cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits.: powerpc
From: Rusty Russell <rusty@rustcorp.com.au>

Impact: cleanup, futureproof

In fact, all cpumask ops will only be valid (in general) for bit
numbers < nr_cpu_ids.  So use that instead of NR_CPUS in various
places.

This is always safe: no cpu number can be >= nr_cpu_ids, and
nr_cpu_ids is initialized to NR_CPUS at boot.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
---
 arch/powerpc/include/asm/cputhreads.h        |    2 +-
 arch/powerpc/kernel/irq.c                    |    2 +-
 arch/powerpc/kernel/machine_kexec_64.c       |    2 +-
 arch/powerpc/kernel/process.c                |    2 +-
 arch/powerpc/kernel/setup-common.c           |   10 +++++-----
 arch/powerpc/mm/numa.c                       |    4 ++--
 arch/powerpc/platforms/powermac/setup.c      |    2 +-
 arch/powerpc/platforms/powermac/smp.c        |    4 ++--
 arch/powerpc/platforms/pseries/hotplug-cpu.c |    2 +-
 arch/powerpc/platforms/pseries/rtasd.c       |    2 +-
 arch/powerpc/platforms/pseries/xics.c        |    2 +-
 arch/powerpc/xmon/xmon.c                     |    4 ++--
 12 files changed, 19 insertions(+), 19 deletions(-)

--- linux-2.6.orig/arch/powerpc/include/asm/cputhreads.h
+++ linux-2.6/arch/powerpc/include/asm/cputhreads.h
@@ -34,7 +34,7 @@ static inline cpumask_t cpu_thread_mask_
 	int		i;
 
 	res = CPU_MASK_NONE;
-	for (i = 0; i < NR_CPUS; i += threads_per_core) {
+	for (i = 0; i < nr_cpu_ids; i += threads_per_core) {
 		cpus_shift_left(tmp, threads_core_mask, i);
 		if (cpus_intersects(threads, tmp))
 			cpu_set(i, res);
--- linux-2.6.orig/arch/powerpc/kernel/irq.c
+++ linux-2.6/arch/powerpc/kernel/irq.c
@@ -232,7 +232,7 @@ void fixup_irqs(cpumask_t map)
 			continue;
 
 		cpus_and(mask, irq_desc[irq].affinity, map);
-		if (any_online_cpu(mask) == NR_CPUS) {
+		if (any_online_cpu(mask) >= nr_cpu_ids) {
 			printk("Breaking affinity for irq %i\n", irq);
 			mask = map;
 		}
--- linux-2.6.orig/arch/powerpc/kernel/machine_kexec_64.c
+++ linux-2.6/arch/powerpc/kernel/machine_kexec_64.c
@@ -176,7 +176,7 @@ static void kexec_prepare_cpus(void)
 	my_cpu = get_cpu();
 
 	/* check the others cpus are now down (via paca hw cpu id == -1) */
-	for (i=0; i < NR_CPUS; i++) {
+	for (i = 0; i < nr_cpu_ids; i++) {
 		if (i == my_cpu)
 			continue;
 
--- linux-2.6.orig/arch/powerpc/kernel/process.c
+++ linux-2.6/arch/powerpc/kernel/process.c
@@ -941,7 +941,7 @@ static inline int valid_irq_stack(unsign
 	 * Avoid crashing if the stack has overflowed and corrupted
 	 * task_cpu(p), which is in the thread_info struct.
 	 */
-	if (cpu < NR_CPUS && cpu_possible(cpu)) {
+	if (cpu < nr_cpu_ids && cpu_possible(cpu)) {
 		stack_page = (unsigned long) hardirq_ctx[cpu];
 		if (sp >= stack_page + sizeof(struct thread_struct)
 		    && sp <= stack_page + THREAD_SIZE - nbytes)
--- linux-2.6.orig/arch/powerpc/kernel/setup-common.c
+++ linux-2.6/arch/powerpc/kernel/setup-common.c
@@ -167,7 +167,7 @@ static int show_cpuinfo(struct seq_file 
 	unsigned short maj;
 	unsigned short min;
 
-	if (cpu_id == NR_CPUS) {
+	if (cpu_id == nr_cpu_ids) {
 		struct device_node *root;
 		const char *model = NULL;
 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
@@ -203,7 +203,7 @@ static int show_cpuinfo(struct seq_file 
 	/* We only show online cpus: disable preempt (overzealous, I
 	 * knew) to prevent cpu going down. */
 	preempt_disable();
-	if (!cpu_online(cpu_id)) {
+	if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id)) {
 		preempt_enable();
 		return 0;
 	}
@@ -314,7 +314,7 @@ static void *c_start(struct seq_file *m,
 {
 	unsigned long i = *pos;
 
-	return i <= NR_CPUS ? (void *)(i + 1) : NULL;
+	return i <= nr_cpu_ids ? (void *)(i + 1) : NULL;
 }
 
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
@@ -409,7 +409,7 @@ void __init smp_setup_cpu_maps(void)
 
 	DBG("smp_setup_cpu_maps()\n");
 
-	while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
+	while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
 		const int *intserv;
 		int j, len;
 
@@ -428,7 +428,7 @@ void __init smp_setup_cpu_maps(void)
 				intserv = &cpu;	/* assume logical == phys */
 		}
 
-		for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+		for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
 			DBG("    thread %d -> cpu %d (hard id %d)\n",
 			    j, cpu, intserv[j]);
 			cpu_set(cpu, cpu_present_map);
--- linux-2.6.orig/arch/powerpc/mm/numa.c
+++ linux-2.6/arch/powerpc/mm/numa.c
@@ -765,7 +765,7 @@ void __init dump_numa_cpu_topology(void)
 		 * If we used a CPU iterator here we would miss printing
 		 * the holes in the cpumap.
 		 */
-		for (cpu = 0; cpu < NR_CPUS; cpu++) {
+		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 			if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
 				if (count == 0)
 					printk(" %u", cpu);
@@ -778,7 +778,7 @@ void __init dump_numa_cpu_topology(void)
 		}
 
 		if (count > 1)
-			printk("-%u", NR_CPUS - 1);
+			printk("-%u", nr_cpu_ids - 1);
 		printk("\n");
 	}
 }
--- linux-2.6.orig/arch/powerpc/platforms/powermac/setup.c
+++ linux-2.6/arch/powerpc/platforms/powermac/setup.c
@@ -365,7 +365,7 @@ static void __init pmac_setup_arch(void)
 		 */
 		int cpu;
 
-		for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
+		for (cpu = 1; cpu < 4 && cpu < nr_cpu_ids; ++cpu)
 			cpu_set(cpu, cpu_possible_map);
 		smp_ops = &psurge_smp_ops;
 	}
--- linux-2.6.orig/arch/powerpc/platforms/powermac/smp.c
+++ linux-2.6/arch/powerpc/platforms/powermac/smp.c
@@ -314,8 +314,8 @@ static int __init smp_psurge_probe(void)
 	 * device tree for them, and smp_setup_cpu_maps hasn't
 	 * set their bits in cpu_possible_map and cpu_present_map.
 	 */
-	if (ncpus > NR_CPUS)
-		ncpus = NR_CPUS;
+	if (ncpus > nr_cpu_ids)
+		ncpus = nr_cpu_ids;
 	for (i = 1; i < ncpus ; ++i) {
 		cpu_set(i, cpu_present_map);
 		set_hard_smp_processor_id(i, i);
--- linux-2.6.orig/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ linux-2.6/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -221,7 +221,7 @@ static void pseries_remove_processor(str
 			set_hard_smp_processor_id(cpu, -1);
 			break;
 		}
-		if (cpu == NR_CPUS)
+		if (cpu >= nr_cpu_ids)
 			printk(KERN_WARNING "Could not find cpu to remove "
 			       "with physical id 0x%x\n", intserv[i]);
 	}
--- linux-2.6.orig/arch/powerpc/platforms/pseries/rtasd.c
+++ linux-2.6/arch/powerpc/platforms/pseries/rtasd.c
@@ -400,7 +400,7 @@ static void do_event_scan_all_cpus(long 
 		get_online_cpus();
 
 		cpu = next_cpu(cpu, cpu_online_map);
-		if (cpu == NR_CPUS)
+		if (cpu >= nr_cpu_ids)
 			break;
 	}
 	put_online_cpus();
--- linux-2.6.orig/arch/powerpc/platforms/pseries/xics.c
+++ linux-2.6/arch/powerpc/platforms/pseries/xics.c
@@ -164,7 +164,7 @@ static int get_irq_server(unsigned int v
 
 		server = first_cpu(tmp);
 
-		if (server < NR_CPUS)
+		if (server < nr_cpu_ids)
 			return get_hard_smp_processor_id(server);
 
 		if (strict_check)
--- linux-2.6.orig/arch/powerpc/xmon/xmon.c
+++ linux-2.6/arch/powerpc/xmon/xmon.c
@@ -938,7 +938,7 @@ static int cpu_cmd(void)
 		/* print cpus waiting or in xmon */
 		printf("cpus stopped:");
 		count = 0;
-		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+		for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
 			if (cpu_isset(cpu, cpus_in_xmon)) {
 				if (count == 0)
 					printf(" %x", cpu);
@@ -950,7 +950,7 @@ static int cpu_cmd(void)
 			}
 		}
 		if (count > 1)
-			printf("-%x", NR_CPUS - 1);
+			printf("-%x", nr_cpu_ids - 1);
 		printf("\n");
 		return 0;
 	}
