cpumask: powerpc converted to new cpumask operators.

This isn't a complete conversion, but it converts all the cases where
a cpumask operation is potentially dealing with undefined bits.

** UNTESTED EXAMPLE **

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/powerpc/include/asm/cputhreads.h        |    6 +++---
 arch/powerpc/kernel/crash.c                  |    8 ++++----
 arch/powerpc/kernel/smp.c                    |    2 +-
 arch/powerpc/platforms/86xx/mpc86xx_smp.c    |    4 ++--
 arch/powerpc/platforms/cell/beat_smp.c       |    4 ++--
 arch/powerpc/platforms/cell/smp.c            |    4 ++--
 arch/powerpc/platforms/iseries/smp.c         |    4 ++--
 arch/powerpc/platforms/pseries/hotplug-cpu.c |   20 ++++++++++----------
 arch/powerpc/platforms/pseries/rtasd.c       |    4 ++--
 arch/powerpc/platforms/pseries/smp.c         |    2 +-
 arch/powerpc/platforms/pseries/xics.c        |   10 +++++-----
 arch/powerpc/sysdev/mpic.c                   |   14 +++++---------
 arch/powerpc/xmon/xmon.c                     |    4 ++--
 13 files changed, 41 insertions(+), 45 deletions(-)

diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -35,9 +35,9 @@ static inline cpumask_t cpu_thread_mask_
 
 	res = CPU_MASK_NONE;
 	for (i = 0; i < nr_cpu_ids; i += threads_per_core) {
-		cpus_shift_left(tmp, threads_core_mask, i);
-		if (cpus_intersects(threads, tmp))
-			cpu_set(i, res);
+		cpumask_shift_left(&tmp, &threads_core_mask, i);
+		if (cpumask_intersects(&threads, &tmp))
+			cpumask_set_cpu(i, &res);
 	}
 	return res;
 }
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int
 	 */
 	printk(KERN_EMERG "Sending IPI to other cpus...\n");
 	msecs = 10000;
-	while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+	while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
 		cpu_relax();
 		mdelay(1);
 	}
@@ -144,13 +144,13 @@ static void crash_kexec_prepare_cpus(int
 	 * user to do soft reset such that we get all.
 	 * Soft-reset will be used until better mechanism is implemented.
 	 */
-	if (cpus_weight(cpus_in_crash) < ncpus) {
+	if (cpumask_weight(&cpus_in_crash) < ncpus) {
 		printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
-			ncpus - cpus_weight(cpus_in_crash));
+			ncpus - cpumask_weight(&cpus_in_crash));
 		printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
 		cpus_in_sr = CPU_MASK_NONE;
 		atomic_set(&enter_on_soft_reset, 0);
-		while (cpus_weight(cpus_in_crash) < ncpus)
+		while (cpumask_weight(&cpus_in_crash) < ncpus)
 			cpu_relax();
 	}
 	/*
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -77,7 +77,7 @@ static void (*crash_ipi_function_ptr)(st
 #ifdef CONFIG_PPC64
 void __devinit smp_generic_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	/*
 	 * The processor is currently spinning, waiting for the
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -37,7 +37,7 @@ smp_86xx_release_core(int nr)
 	__be32 __iomem *mcm_vaddr;
 	unsigned long pcr;
 
-	if (nr < 0 || nr >= NR_CPUS)
+	if (nr < 0 || nr >= nr_cpu_ids)
 		return;
 
 	/*
@@ -59,7 +59,7 @@ smp_86xx_kick_cpu(int nr)
 	int n = 0;
 	unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
 
-	if (nr < 0 || nr >= NR_CPUS)
+	if (nr < 0 || nr >= nr_cpu_ids)
 		return;
 
 	pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ b/arch/powerpc/platforms/cell/beat_smp.c
@@ -85,7 +85,7 @@ static void smp_beatic_message_pass(int 
 
 static int __init smp_beatic_probe(void)
 {
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void __devinit smp_beatic_setup_cpu(int cpu)
@@ -95,7 +95,7 @@ static void __devinit smp_beatic_setup_c
 
 static void __devinit smp_celleb_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	if (!smp_startup_cpu(nr))
 		return;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -126,7 +126,7 @@ static int __init smp_iic_probe(void)
 {
 	iic_request_IPIs();
 
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void __devinit smp_cell_setup_cpu(int cpu)
@@ -167,7 +167,7 @@ static void __devinit cell_take_timebase
 
 static void __devinit smp_cell_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	if (!smp_startup_cpu(nr))
 		return;
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -83,12 +83,12 @@ static void smp_iSeries_message_pass(int
 
 static int smp_iSeries_probe(void)
 {
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void smp_iSeries_kick_cpu(int nr)
 {
-	BUG_ON((nr < 0) || (nr >= NR_CPUS));
+	BUG_ON((nr < 0) || (nr >= nr_cpu_ids));
 
 	/* Verify that our partition has a processor nr */
 	if (lppaca[nr].dyn_proc_status >= 2)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -99,7 +99,7 @@ static int pseries_cpu_disable(void)
 
 	/*fix boot_cpuid here*/
 	if (cpu == boot_cpuid)
-		boot_cpuid = any_online_cpu(cpu_online_map);
+		boot_cpuid = cpumask_any(cpu_online_mask);
 
 	/* FIXME: abstract this to not be platform specific later on */
 	xics_migrate_irqs_away();
@@ -155,35 +155,35 @@ static int pseries_add_processor(struct 
 
 	cpu_maps_update_begin();
 
-	BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
 
 	/* Get a bitmap of unoccupied slots. */
-	cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
-	if (cpus_empty(candidate_map)) {
+	cpumask_xor(&candidate_map, cpu_possible_mask, cpu_present_mask);
+	if (cpumask_empty(&candidate_map)) {
 		/* If we get here, it most likely means that NR_CPUS is
 		 * less than the partition's max processors setting.
 		 */
 		printk(KERN_ERR "Cannot add cpu %s; this system configuration"
 		       " supports %d logical cpus.\n", np->full_name,
-		       cpus_weight(cpu_possible_map));
+		       cpumask_weight(cpu_possible_mask));
 		goto out_unlock;
 	}
 
-	while (!cpus_empty(tmp))
-		if (cpus_subset(tmp, candidate_map))
+	while (!cpumask_empty(&tmp))
+		if (cpumask_subset(&tmp, &candidate_map))
 			/* Found a range where we can insert the new cpu(s) */
 			break;
 		else
-			cpus_shift_left(tmp, tmp, nthreads);
+			cpumask_shift_left(&tmp, &tmp, nthreads);
 
-	if (cpus_empty(tmp)) {
+	if (cpumask_empty(&tmp)) {
 		printk(KERN_ERR "Unable to find space in cpu_present_map for"
 		       " processor %s with %d thread(s)\n", np->name,
 		       nthreads);
 		goto out_unlock;
 	}
 
-	for_each_cpu_mask(cpu, tmp) {
+	for_each_cpu(cpu, &tmp) {
 		BUG_ON(cpu_isset(cpu, cpu_present_map));
 		cpu_set(cpu, cpu_present_map);
 		set_hard_smp_processor_id(cpu, *intserv++);
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -392,7 +392,7 @@ static void do_event_scan_all_cpus(long 
 	int cpu;
 
 	get_online_cpus();
-	cpu = first_cpu(cpu_online_map);
+	cpu = cpumask_first(cpu_online_mask);
 	for (;;) {
 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
 		do_event_scan();
@@ -403,7 +403,7 @@ static void do_event_scan_all_cpus(long 
 		msleep_interruptible(delay);
 		get_online_cpus();
 
-		cpu = next_cpu(cpu, cpu_online_map);
+		cpu = cpumask_next(cpu, cpu_online_mask);
 		if (cpu >= nr_cpu_ids)
 			break;
 	}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -145,7 +145,7 @@ static void __devinit pSeries_take_timeb
 
 static void __devinit smp_pSeries_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	if (!smp_startup_cpu(nr))
 		return;
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -160,10 +160,10 @@ static int get_irq_server(unsigned int v
 	if (!distribute_irqs)
 		return default_server;
 
-	if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
-		cpus_and(tmp, cpu_online_map, cpumask);
+	if (!cpumask_equal(&cpumask, cpu_all_mask)) {
+		cpumask_and(&tmp, cpu_online_mask, &cpumask);
 
-		server = first_cpu(tmp);
+		server = cpumask_first(&tmp);
 
 		if (server < nr_cpu_ids)
 			return get_hard_smp_processor_id(server);
@@ -172,7 +172,7 @@ static int get_irq_server(unsigned int v
 			return -1;
 	}
 
-	if (cpus_equal(cpu_online_map, cpu_present_map))
+	if (cpumask_equal(cpu_online_mask, cpu_present_mask))
 		return default_distrib_server;
 
 	return default_server;
@@ -567,7 +567,7 @@ int __init smp_xics_probe(void)
 {
 	xics_request_ipi();
 
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 #endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -580,25 +580,21 @@ static int irq_choose_cpu(unsigned int v
 		spin_lock_irqsave(&irq_rover_lock, flags);
 
 		while (!cpu_online(irq_rover)) {
-			if (++irq_rover >= NR_CPUS)
+			if (++irq_rover >= nr_cpu_ids)
 				irq_rover = 0;
 		}
 		cpuid = irq_rover;
 		do {
-			if (++irq_rover >= NR_CPUS)
+			if (++irq_rover >= nr_cpu_ids)
 				irq_rover = 0;
 		} while (!cpu_online(irq_rover));
 
 		spin_unlock_irqrestore(&irq_rover_lock, flags);
 	} else {
-		cpumask_t tmp;
-
-		cpus_and(tmp, cpu_online_map, mask);
-
-		if (cpus_empty(tmp))
+		if (!cpumask_intersects(cpu_online_mask, &mask))
 			goto do_round_robin;
 
-		cpuid = first_cpu(tmp);
+		cpuid = cpumask_first_and(cpu_online_mask, &mask);
 	}
 
 	return get_hard_smp_processor_id(cpuid);
@@ -1581,7 +1577,7 @@ int __init smp_mpic_probe(void)
 
 	DBG("smp_mpic_probe()...\n");
 
-	nr_cpus = cpus_weight(cpu_possible_map);
+	nr_cpus = cpumask_weight(cpu_possible_mask);
 
 	DBG("nr_cpus: %d\n", nr_cpus);
 
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -329,7 +329,7 @@ static void release_output_lock(void)
 
 int cpus_are_in_xmon(void)
 {
-	return !cpus_empty(cpus_in_xmon);
+	return !cpumask_empty(&cpus_in_xmon);
 }
 #endif
 
@@ -425,7 +425,7 @@ static int xmon_core(struct pt_regs *reg
 			smp_send_debugger_break(MSG_ALL_BUT_SELF);
 			/* wait for other cpus to come in */
 			for (timeout = 100000000; timeout != 0; --timeout) {
-				if (cpus_weight(cpus_in_xmon) >= ncpus)
+				if (cpumask_weight(&cpus_in_xmon) >= ncpus)
 					break;
 				barrier();
 			}
