cpumask: convert alloc_percpu to new cpumask functions.

This patch will conflict with my cpualloc work (held by Tejun), so
it's a separate patch so it can easily be discarded.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 include/linux/percpu.h |   12 +++++++-----
 mm/allocpercpu.c       |   24 +++++++++++++-----------
 2 files changed, 20 insertions(+), 16 deletions(-)

diff --git a/include/linux/percpu.h b/include/linux/percpu.h
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -81,14 +81,16 @@ struct percpu_data {
         (__typeof__(ptr))__p->ptrs[(cpu)];	          \
 })
 
-extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
+extern void *__percpu_alloc_mask(size_t size, gfp_t gfp,
+				 const struct cpumask *mask);
 extern void percpu_free(void *__pdata);
 
 #else /* CONFIG_SMP */
 
 #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
 
-static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp,
+						 const struct cpumask *mask)
 {
 	return kzalloc(size, gfp);
 }
@@ -103,12 +105,12 @@ static inline void percpu_free(void *__p
 #define percpu_alloc_mask(size, gfp, mask) \
 	__percpu_alloc_mask((size), (gfp), &(mask))
 
-#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
+#define percpu_alloc(size, gfp) __percpu_alloc_mask((size), (gfp), cpu_online_mask)
 
 /* (legacy) interface for use without CPU hotplug handling */
 
-#define __alloc_percpu(size)	percpu_alloc_mask((size), GFP_KERNEL, \
-						  cpu_possible_map)
+#define __alloc_percpu(size)	__percpu_alloc_mask((size), GFP_KERNEL, \
+						  cpu_possible_mask)
 #define alloc_percpu(type)	(type *)__alloc_percpu(sizeof(type))
 #define free_percpu(ptr)	percpu_free((ptr))
 #define per_cpu_ptr(ptr, cpu)	percpu_ptr((ptr), (cpu))
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -31,15 +31,12 @@ static void percpu_depopulate(void *__pd
  * @__pdata: per-cpu data to depopulate
  * @mask: depopulate per-cpu data for cpu's selected through mask bits
  */
-static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
+static void __percpu_depopulate_mask(void *__pdata, const struct cpumask *mask)
 {
 	int cpu;
-	for_each_cpu_mask_nr(cpu, *mask)
+	for_each_cpu(cpu, mask)
 		percpu_depopulate(__pdata, cpu);
 }
-
-#define percpu_depopulate_mask(__pdata, mask) \
-	__percpu_depopulate_mask((__pdata), &(mask))
 
 /**
  * percpu_populate - populate per-cpu data for given cpu
@@ -82,16 +79,21 @@ static int __percpu_populate_mask(void *
 static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
 				  cpumask_t *mask)
 {
-	cpumask_t populated;
+	cpumask_var_t populated;
 	int cpu;
 
-	cpus_clear(populated);
-	for_each_cpu_mask_nr(cpu, *mask)
+	if (!alloc_cpumask_var(&populated, gfp))
+		return -ENOMEM;
+
+	cpumask_clear(populated);
+	for_each_cpu(cpu, mask)
 		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
-			__percpu_depopulate_mask(__pdata, &populated);
+			__percpu_depopulate_mask(__pdata, populated);
+			free_cpumask_var(populated);
 			return -ENOMEM;
 		} else
-			cpu_set(cpu, populated);
+			cpumask_set_cpu(cpu, populated);
+	free_cpumask_var(populated);
 	return 0;
 }
 
@@ -108,7 +110,7 @@ static int __percpu_populate_mask(void *
  * which is simplified by the percpu_alloc() wrapper.
  * Per-cpu objects are populated with zeroed buffers.
  */
-void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+void *__percpu_alloc_mask(size_t size, gfp_t gfp, const struct cpumask *mask)
 {
 	/*
 	 * We allocate whole cache lines to avoid false sharing
