cpumask: powerpc converted to new cpumask operators.

This isn't a complete conversion, but it converts all the cases where
a cpumask operation is potentially dealing with undefined bits.

In several places, NR_CPUS is converted to nr_cpu_ids.  This tighter
constraint is generally preferred, and converting now eases any future
CONFIG_CPUMASK_OFFSTACK for powerpc.

I couldn't help converting a few unnecessary temporary cpumasks to
pointers (cpumask_of_cpu -> cpumask_of).

** UNTESTED EXAMPLE **

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/powerpc/include/asm/cputhreads.h           |    6 +++---
 arch/powerpc/kernel/crash.c                     |    8 ++++----
 arch/powerpc/kernel/irq.c                       |    4 ++--
 arch/powerpc/kernel/smp.c                       |    2 +-
 arch/powerpc/mm/hash_utils_64.c                 |   12 ++++++------
 arch/powerpc/mm/mmu_context_nohash.c            |    2 +-
 arch/powerpc/mm/pgtable.c                       |    4 ++--
 arch/powerpc/mm/tlb_hash64.c                    |    6 +++---
 arch/powerpc/platforms/86xx/mpc86xx_smp.c       |    4 ++--
 arch/powerpc/platforms/cell/beat_smp.c          |    4 ++--
 arch/powerpc/platforms/cell/cpufreq_spudemand.c |    4 ++--
 arch/powerpc/platforms/cell/smp.c               |    4 ++--
 arch/powerpc/platforms/iseries/smp.c            |    4 ++--
 arch/powerpc/platforms/pseries/hotplug-cpu.c    |   20 ++++++++++----------
 arch/powerpc/platforms/pseries/rtasd.c          |    4 ++--
 arch/powerpc/platforms/pseries/smp.c            |    2 +-
 arch/powerpc/platforms/pseries/xics.c           |   10 +++++-----
 arch/powerpc/sysdev/mpic.c                      |   16 ++++++----------
 arch/powerpc/xmon/xmon.c                        |    4 ++--
 19 files changed, 58 insertions(+), 62 deletions(-)

diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -35,9 +35,9 @@ static inline cpumask_t cpu_thread_mask_
 
 	res = CPU_MASK_NONE;
 	for (i = 0; i < nr_cpu_ids; i += threads_per_core) {
-		cpus_shift_left(tmp, threads_core_mask, i);
-		if (cpus_intersects(threads, tmp))
-			cpu_set(i, res);
+		cpumask_shift_left(&tmp, &threads_core_mask, i);
+		if (cpumask_intersects(&threads, &tmp))
+			cpumask_set_cpu(i, &res);
 	}
 	return res;
 }
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int
 	 */
 	printk(KERN_EMERG "Sending IPI to other cpus...\n");
 	msecs = 10000;
-	while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+	while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
 		cpu_relax();
 		mdelay(1);
 	}
@@ -144,13 +144,13 @@ static void crash_kexec_prepare_cpus(int
 	 * user to do soft reset such that we get all.
 	 * Soft-reset will be used until better mechanism is implemented.
 	 */
-	if (cpus_weight(cpus_in_crash) < ncpus) {
+	if (cpumask_weight(&cpus_in_crash) < ncpus) {
 		printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
-			ncpus - cpus_weight(cpus_in_crash));
+			ncpus - cpumask_weight(&cpus_in_crash));
 		printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
 		cpus_in_sr = CPU_MASK_NONE;
 		atomic_set(&enter_on_soft_reset, 0);
-		while (cpus_weight(cpus_in_crash) < ncpus)
+		while (cpumask_weight(&cpus_in_crash) < ncpus)
 			cpu_relax();
 	}
 	/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -231,8 +231,8 @@ void fixup_irqs(cpumask_t map)
 		if (irq_desc[irq].status & IRQ_PER_CPU)
 			continue;
 
-		cpus_and(mask, irq_desc[irq].affinity, map);
-		if (any_online_cpu(mask) >= nr_cpu_ids) {
+		cpumask_and(&mask, &irq_desc[irq].affinity, &map);
+		if (cpumask_any_and(&mask, cpu_online_mask) >= nr_cpu_ids) {
 			printk("Breaking affinity for irq %i\n", irq);
 			mask = map;
 		}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -77,7 +77,7 @@ static void (*crash_ipi_function_ptr)(st
 #ifdef CONFIG_PPC64
 void __devinit smp_generic_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	/*
 	 * The processor is currently spinning, waiting for the
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -858,7 +858,7 @@ int hash_page(unsigned long ea, unsigned
 	unsigned long vsid;
 	struct mm_struct *mm;
 	pte_t *ptep;
-	cpumask_t tmp;
+	const struct cpumask *tmp;
 	int rc, user_region = 0, local = 0;
 	int psize, ssize;
 
@@ -906,8 +906,8 @@ int hash_page(unsigned long ea, unsigned
 		return 1;
 
 	/* Check CPU locality */
-	tmp = cpumask_of_cpu(smp_processor_id());
-	if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
+	tmp = cpumask_of(smp_processor_id());
+	if (user_region && cpumask_equal(&mm->cpu_vm_mask, tmp))
 		local = 1;
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -1023,7 +1023,7 @@ void hash_preload(struct mm_struct *mm, 
 	unsigned long vsid;
 	void *pgdir;
 	pte_t *ptep;
-	cpumask_t mask;
+	const struct cpumask *mask;
 	unsigned long flags;
 	int local = 0;
 	int ssize;
@@ -1066,8 +1066,8 @@ void hash_preload(struct mm_struct *mm, 
 	local_irq_save(flags);
 
 	/* Is that local to this CPU ? */
-	mask = cpumask_of_cpu(smp_processor_id());
-	if (cpus_equal(mm->cpu_vm_mask, mask))
+	mask = cpumask_of(smp_processor_id());
+	if (cpumask_equal(&mm->cpu_vm_mask, mask))
 		local = 1;
 
 	/* Hash it in */
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -97,7 +97,7 @@ static unsigned int steal_context_smp(un
 		mm->context.id = MMU_NO_CONTEXT;
 
 		/* Mark it stale on all CPUs that used this mm */
-		for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
+		for_each_cpu(cpu, &mm->cpu_vm_mask)
 			__set_bit(id, stale_map[cpu]);
 		return id;
 	}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -81,11 +81,11 @@ void pgtable_free_tlb(struct mmu_gather 
 void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
 {
 	/* This is safe since tlb_gather_mmu has disabled preemption */
-        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
+	const struct cpumask *local_cpumask = cpumask_of(smp_processor_id());
 	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
 
 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
-	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+	    cpumask_equal(&tlb->mm->cpu_vm_mask, local_cpumask)) {
 		pgtable_free(pgf);
 		return;
 	}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -139,12 +139,12 @@ void hpte_need_flush(struct mm_struct *m
  */
 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
 {
-	cpumask_t tmp;
 	int i, local = 0;
+	const struct cpumask *tmp;
 
 	i = batch->index;
-	tmp = cpumask_of_cpu(smp_processor_id());
-	if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
+	tmp = cpumask_of(smp_processor_id());
+	if (cpumask_equal(&batch->mm->cpu_vm_mask, tmp))
 		local = 1;
 	if (i == 1)
 		flush_hash_page(batch->vaddr[0], batch->pte[0],
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -37,7 +37,7 @@ smp_86xx_release_core(int nr)
 	__be32 __iomem *mcm_vaddr;
 	unsigned long pcr;
 
-	if (nr < 0 || nr >= NR_CPUS)
+	if (nr < 0 || nr >= nr_cpu_ids)
 		return;
 
 	/*
@@ -59,7 +59,7 @@ smp_86xx_kick_cpu(int nr)
 	int n = 0;
 	unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
 
-	if (nr < 0 || nr >= NR_CPUS)
+	if (nr < 0 || nr >= nr_cpu_ids)
 		return;
 
 	pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ b/arch/powerpc/platforms/cell/beat_smp.c
@@ -85,7 +85,7 @@ static void smp_beatic_message_pass(int 
 
 static int __init smp_beatic_probe(void)
 {
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void __devinit smp_beatic_setup_cpu(int cpu)
@@ -95,7 +95,7 @@ static void __devinit smp_beatic_setup_c
 
 static void __devinit smp_celleb_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	if (!smp_startup_cpu(nr))
 		return;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -110,7 +110,7 @@ static int spu_gov_govern(struct cpufreq
 		}
 
 		/* initialize spu_gov_info for all affected cpus */
-		for_each_cpu_mask(i, policy->cpus) {
+		for_each_cpu(i, &policy->cpus) {
 			affected_info = &per_cpu(spu_gov_info, i);
 			affected_info->policy = policy;
 		}
@@ -127,7 +127,7 @@ static int spu_gov_govern(struct cpufreq
 		spu_gov_cancel_work(info);
 
 		/* clean spu_gov_info for all affected cpus */
-		for_each_cpu_mask (i, policy->cpus) {
+		for_each_cpu(i, &policy->cpus) {
 			info = &per_cpu(spu_gov_info, i);
 			info->policy = NULL;
 		}
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -126,7 +126,7 @@ static int __init smp_iic_probe(void)
 {
 	iic_request_IPIs();
 
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void __devinit smp_cell_setup_cpu(int cpu)
@@ -167,7 +167,7 @@ static void __devinit cell_take_timebase
 
 static void __devinit smp_cell_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	if (!smp_startup_cpu(nr))
 		return;
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -83,12 +83,12 @@ static void smp_iSeries_message_pass(int
 
 static int smp_iSeries_probe(void)
 {
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 static void smp_iSeries_kick_cpu(int nr)
 {
-	BUG_ON((nr < 0) || (nr >= NR_CPUS));
+	BUG_ON((nr < 0) || (nr >= nr_cpu_ids));
 
 	/* Verify that our partition has a processor nr */
 	if (lppaca[nr].dyn_proc_status >= 2)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -99,7 +99,7 @@ static int pseries_cpu_disable(void)
 
 	/*fix boot_cpuid here*/
 	if (cpu == boot_cpuid)
-		boot_cpuid = any_online_cpu(cpu_online_map);
+		boot_cpuid = cpumask_any(cpu_online_mask);
 
 	/* FIXME: abstract this to not be platform specific later on */
 	xics_migrate_irqs_away();
@@ -155,35 +155,35 @@ static int pseries_add_processor(struct 
 
 	cpu_maps_update_begin();
 
-	BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
 
 	/* Get a bitmap of unoccupied slots. */
-	cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
-	if (cpus_empty(candidate_map)) {
+	cpumask_xor(&candidate_map, cpu_possible_mask, cpu_present_mask);
+	if (cpumask_empty(&candidate_map)) {
 		/* If we get here, it most likely means that NR_CPUS is
 		 * less than the partition's max processors setting.
 		 */
 		printk(KERN_ERR "Cannot add cpu %s; this system configuration"
 		       " supports %d logical cpus.\n", np->full_name,
-		       cpus_weight(cpu_possible_map));
+		       cpumask_weight(cpu_possible_mask));
 		goto out_unlock;
 	}
 
-	while (!cpus_empty(tmp))
-		if (cpus_subset(tmp, candidate_map))
+	while (!cpumask_empty(&tmp))
+		if (cpumask_subset(&tmp, &candidate_map))
 			/* Found a range where we can insert the new cpu(s) */
 			break;
 		else
-			cpus_shift_left(tmp, tmp, nthreads);
+			cpumask_shift_left(&tmp, &tmp, nthreads);
 
-	if (cpus_empty(tmp)) {
+	if (cpumask_empty(&tmp)) {
 		printk(KERN_ERR "Unable to find space in cpu_present_map for"
 		       " processor %s with %d thread(s)\n", np->name,
 		       nthreads);
 		goto out_unlock;
 	}
 
-	for_each_cpu_mask(cpu, tmp) {
+	for_each_cpu(cpu, &tmp) {
 		BUG_ON(cpu_isset(cpu, cpu_present_map));
 		cpu_set(cpu, cpu_present_map);
 		set_hard_smp_processor_id(cpu, *intserv++);
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -388,7 +388,7 @@ static void do_event_scan_all_cpus(long 
 	int cpu;
 
 	get_online_cpus();
-	cpu = first_cpu(cpu_online_map);
+	cpu = cpumask_first(cpu_online_mask);
 	for (;;) {
 		set_cpus_allowed(current, cpumask_of_cpu(cpu));
 		do_event_scan();
@@ -399,7 +399,7 @@ static void do_event_scan_all_cpus(long 
 		msleep_interruptible(delay);
 		get_online_cpus();
 
-		cpu = next_cpu(cpu, cpu_online_map);
+		cpu = cpumask_next(cpu, cpu_online_mask);
 		if (cpu >= nr_cpu_ids)
 			break;
 	}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -145,7 +145,7 @@ static void __devinit pSeries_take_timeb
 
 static void __devinit smp_pSeries_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON(nr < 0 || nr >= nr_cpu_ids);
 
 	if (!smp_startup_cpu(nr))
 		return;
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -159,10 +159,10 @@ static int get_irq_server(unsigned int v
 	if (!distribute_irqs)
 		return default_server;
 
-	if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
-		cpus_and(tmp, cpu_online_map, cpumask);
+	if (!cpumask_equal(&cpumask, cpu_all_mask)) {
+		cpumask_and(&tmp, cpu_online_mask, &cpumask);
 
-		server = first_cpu(tmp);
+		server = cpumask_first(&tmp);
 
 		if (server < nr_cpu_ids)
 			return get_hard_smp_processor_id(server);
@@ -171,7 +171,7 @@ static int get_irq_server(unsigned int v
 			return -1;
 	}
 
-	if (cpus_equal(cpu_online_map, cpu_present_map))
+	if (cpumask_equal(cpu_online_mask, cpu_present_mask))
 		return default_distrib_server;
 
 	return default_server;
@@ -566,7 +566,7 @@ int __init smp_xics_probe(void)
 {
 	xics_request_ipi();
 
-	return cpus_weight(cpu_possible_map);
+	return cpumask_weight(cpu_possible_mask);
 }
 
 #endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -569,7 +569,7 @@ static int irq_choose_cpu(unsigned int v
 	cpumask_t mask = irq_desc[virt_irq].affinity;
 	int cpuid;
 
-	if (cpus_equal(mask, CPU_MASK_ALL)) {
+	if (cpumask_equal(&mask, cpu_all_mask)) {
 		static int irq_rover;
 		static DEFINE_SPINLOCK(irq_rover_lock);
 		unsigned long flags;
@@ -579,25 +579,21 @@ static int irq_choose_cpu(unsigned int v
 		spin_lock_irqsave(&irq_rover_lock, flags);
 
 		while (!cpu_online(irq_rover)) {
-			if (++irq_rover >= NR_CPUS)
+			if (++irq_rover >= nr_cpu_ids)
 				irq_rover = 0;
 		}
 		cpuid = irq_rover;
 		do {
-			if (++irq_rover >= NR_CPUS)
+			if (++irq_rover >= nr_cpu_ids)
 				irq_rover = 0;
 		} while (!cpu_online(irq_rover));
 
 		spin_unlock_irqrestore(&irq_rover_lock, flags);
 	} else {
-		cpumask_t tmp;
-
-		cpus_and(tmp, cpu_online_map, mask);
-
-		if (cpus_empty(tmp))
+		if (!cpumask_intersects(cpu_online_mask, &mask))
 			goto do_round_robin;
 
-		cpuid = first_cpu(tmp);
+		cpuid = cpumask_first_and(cpu_online_mask, &mask);
 	}
 
 	return get_hard_smp_processor_id(cpuid);
@@ -1580,7 +1576,7 @@ int __init smp_mpic_probe(void)
 
 	DBG("smp_mpic_probe()...\n");
 
-	nr_cpus = cpus_weight(cpu_possible_map);
+	nr_cpus = cpumask_weight(cpu_possible_mask);
 
 	DBG("nr_cpus: %d\n", nr_cpus);
 
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -329,7 +329,7 @@ static void release_output_lock(void)
 
 int cpus_are_in_xmon(void)
 {
-	return !cpus_empty(cpus_in_xmon);
+	return !cpumask_empty(&cpus_in_xmon);
 }
 #endif
 
@@ -425,7 +425,7 @@ static int xmon_core(struct pt_regs *reg
 			smp_send_debugger_break(MSG_ALL_BUT_SELF);
 			/* wait for other cpus to come in */
 			for (timeout = 100000000; timeout != 0; --timeout) {
-				if (cpus_weight(cpus_in_xmon) >= ncpus)
+				if (cpumask_weight(&cpus_in_xmon) >= ncpus)
 					break;
 				barrier();
 			}
