cpumask: convert kernel mm functions

Convert kernel mm functions to use struct cpumask.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 include/linux/percpu.h |   14 +++++++-------
 mm/allocpercpu.c       |   37 +++++++++++++++++--------------------
 mm/pdflush.c           |   16 +++++++++++++---
 mm/slab.c              |    2 +-
 mm/slub.c              |   20 +++++++++++---------
 mm/vmscan.c            |    2 +-
 mm/vmstat.c            |    4 ++--
 7 files changed, 52 insertions(+), 43 deletions(-)

diff -r
--- linux-2.6.28.orig/include/linux/percpu.h
+++ linux-2.6.28/include/linux/percpu.h
@@ -96,14 +96,16 @@ struct percpu_data {
         (__typeof__(ptr))__p->ptrs[(cpu)];	          \
 })
 
-extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
+extern void *percpu_alloc_mask(size_t size, gfp_t gfp,
+			       const struct cpumask *mask);
 extern void percpu_free(void *__pdata);
 
 #else /* CONFIG_SMP */
 
 #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
 
-static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+static __always_inline void *percpu_alloc_mask(size_t size, gfp_t gfp,
+					       const struct cpumask *mask)
 {
 	return kzalloc(size, gfp);
 }
@@ -115,15 +117,13 @@ static inline void percpu_free(void *__p
 
 #endif /* CONFIG_SMP */
 
-#define percpu_alloc_mask(size, gfp, mask) \
-	__percpu_alloc_mask((size), (gfp), &(mask))
-
-#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
+#define percpu_alloc(size, gfp) \
+	percpu_alloc_mask((size), (gfp), cpu_online_mask)
 
 /* (legacy) interface for use without CPU hotplug handling */
 
 #define __alloc_percpu(size)	percpu_alloc_mask((size), GFP_KERNEL, \
-						  cpu_possible_map)
+						  cpu_possible_mask)
 #define alloc_percpu(type)	(type *)__alloc_percpu(sizeof(type))
 #define free_percpu(ptr)	percpu_free((ptr))
 #define per_cpu_ptr(ptr, cpu)	percpu_ptr((ptr), (cpu))
--- linux-2.6.28.orig/mm/allocpercpu.c
+++ linux-2.6.28/mm/allocpercpu.c
@@ -30,17 +30,19 @@ static void percpu_depopulate(void *__pd
  * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
  * @__pdata: per-cpu data to depopulate
  * @mask: depopulate per-cpu data for cpu's selected through mask bits
+ * @limit: only depopulate cpus < this limit.
  */
-static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
+static void percpu_depopulate_mask(void *__pdata, const struct cpumask *mask,
+				   unsigned int limit)
 {
 	int cpu;
-	for_each_cpu(cpu, mask)
+	for_each_cpu(cpu, mask) {
+		if (cpu >= limit)
+			break;
 		percpu_depopulate(__pdata, cpu);
+	}
 }
 
-#define percpu_depopulate_mask(__pdata, mask) \
-	__percpu_depopulate_mask((__pdata), &(mask))
-
 /**
  * percpu_populate - populate per-cpu data for given cpu
  * @__pdata: per-cpu data to populate further
@@ -79,25 +81,20 @@ static void *percpu_populate(void *__pda
  *
  * Per-cpu objects are populated with zeroed buffers.
  */
-static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
-				  cpumask_t *mask)
+static int percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
+				const struct cpumask *mask)
 {
-	cpumask_t populated;
 	int cpu;
 
-	cpus_clear(populated);
-	for_each_cpu(cpu, mask)
+	for_each_cpu(cpu, mask) {
 		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
-			__percpu_depopulate_mask(__pdata, &populated);
+			percpu_depopulate_mask(__pdata, mask, cpu);
 			return -ENOMEM;
-		} else
-			cpu_set(cpu, populated);
+		}
+	}
 	return 0;
 }
 
-#define percpu_populate_mask(__pdata, size, gfp, mask) \
-	__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
-
 /**
  * percpu_alloc_mask - initial setup of per-cpu data
  * @size: size of per-cpu object
@@ -108,7 +105,7 @@ static int __percpu_populate_mask(void *
  * which is simplified by the percpu_alloc() wrapper.
  * Per-cpu objects are populated with zeroed buffers.
  */
-void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+void *percpu_alloc_mask(size_t size, gfp_t gfp, const struct cpumask *mask)
 {
 	/*
 	 * We allocate whole cache lines to avoid false sharing
@@ -119,12 +116,12 @@ void *__percpu_alloc_mask(size_t size, g
 
 	if (unlikely(!pdata))
 		return NULL;
-	if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
+	if (likely(!percpu_populate_mask(__pdata, size, gfp, mask)))
 		return __pdata;
 	kfree(pdata);
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
+EXPORT_SYMBOL_GPL(percpu_alloc_mask);
 
 /**
  * percpu_free - final cleanup of per-cpu data
@@ -137,7 +134,7 @@ void percpu_free(void *__pdata)
 {
 	if (unlikely(!__pdata))
 		return;
-	__percpu_depopulate_mask(__pdata, &cpu_possible_map);
+	percpu_depopulate_mask(__pdata, cpu_possible_mask, nr_cpu_ids);
 	kfree(__percpu_disguise(__pdata));
 }
 EXPORT_SYMBOL_GPL(percpu_free);
--- linux-2.6.28.orig/mm/pdflush.c
+++ linux-2.6.28/mm/pdflush.c
@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work
 static int pdflush(void *dummy)
 {
 	struct pdflush_work my_work;
-	cpumask_t cpus_allowed;
+	cpumask_var_t cpus_allowed;
+
+	/*
+	 * Since the caller doesn't even check kthread_run() worked, let's not
+	 * freak out too much if this fails.
+	 */
+	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+		printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
+		return 0;
+	}
 
 	/*
 	 * pdflush can spend a lot of time doing encryption via dm-crypt.  We
@@ -187,8 +196,9 @@ static int pdflush(void *dummy)
 	 * This is needed as pdflush's are dynamically created and destroyed.
 	 * The boottime pdflush's are easily placed w/o these 2 lines.
 	 */
-	cpuset_cpus_allowed(current, &cpus_allowed);
-	set_cpus_allowed_ptr(current, &cpus_allowed);
+	cpuset_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, cpus_allowed);
+	free_cpumask_var(cpus_allowed);
 
 	return __pdflush(&my_work);
 }
--- linux-2.6.28.orig/mm/slab.c
+++ linux-2.6.28/mm/slab.c
@@ -2080,7 +2080,7 @@ kmem_cache_create (const char *name, siz
 
 	/*
 	 * We use cache_chain_mutex to ensure a consistent view of
-	 * cpu_online_map as well.  Please see cpuup_callback
+	 * cpu_online_mask as well.  Please see cpuup_callback
 	 */
 	get_online_cpus();
 	mutex_lock(&cache_chain_mutex);
--- linux-2.6.28.orig/mm/slub.c
+++ linux-2.6.28/mm/slub.c
@@ -1972,7 +1972,7 @@ static DEFINE_PER_CPU(struct kmem_cache_
 				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
 
 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
-static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
+static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
 
 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
 							int cpu, gfp_t flags)
@@ -2047,13 +2047,13 @@ static void init_alloc_cpu_cpu(int cpu)
 {
 	int i;
 
-	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
+	if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
 		return;
 
 	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
 		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
 
-	cpu_set(cpu, kmem_cach_cpu_free_init_once);
+	cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
 }
 
 static void __init init_alloc_cpu(void)
@@ -3446,7 +3446,7 @@ struct location {
 	long max_time;
 	long min_pid;
 	long max_pid;
-	cpumask_t cpus;
+ 	DECLARE_BITMAP(cpus, NR_CPUS);
 	nodemask_t nodes;
 };
 
@@ -3521,7 +3521,8 @@ static int add_location(struct loc_track
 				if (track->pid > l->max_pid)
 					l->max_pid = track->pid;
 
-				cpu_set(track->cpu, l->cpus);
+				cpumask_set_cpu(track->cpu,
+						to_cpumask(l->cpus));
 			}
 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
 			return 1;
@@ -3551,8 +3552,8 @@ static int add_location(struct loc_track
 	l->max_time = age;
 	l->min_pid = track->pid;
 	l->max_pid = track->pid;
-	cpus_clear(l->cpus);
-	cpu_set(track->cpu, l->cpus);
+	cpumask_clear(to_cpumask(l->cpus));
+	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
 	nodes_clear(l->nodes);
 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
 	return 1;
@@ -3633,11 +3634,12 @@ static int list_locations(struct kmem_ca
 			len += sprintf(buf + len, " pid=%ld",
 				l->min_pid);
 
-		if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
+		if (num_online_cpus() > 1 &&
+				!cpumask_empty(to_cpumask(l->cpus)) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " cpus=");
 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-					&l->cpus);
+						 to_cpumask(l->cpus));
 		}
 
 		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
--- linux-2.6.28.orig/mm/vmscan.c
+++ linux-2.6.28/mm/vmscan.c
@@ -1689,7 +1689,7 @@ static int kswapd(void *p)
 	};
 	const struct cpumask *cpumask = node_to_cpumask(pgdat->node_id);
 
-	if (!cpus_empty(*cpumask))
+	if (!cpumask_empty(cpumask))
 		set_cpus_allowed_ptr(tsk, cpumask);
 	current->reclaim_state = &reclaim_state;
 
--- linux-2.6.28.orig/mm/vmstat.c
+++ linux-2.6.28/mm/vmstat.c
@@ -20,7 +20,7 @@
 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
 EXPORT_PER_CPU_SYMBOL(vm_event_states);
 
-static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
+static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
 {
 	int cpu;
 	int i;
@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long 
 void all_vm_events(unsigned long *ret)
 {
 	get_online_cpus();
-	sum_vm_events(ret, &cpu_online_map);
+	sum_vm_events(ret, cpu_online_mask);
 	put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(all_vm_events);
