cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits.: x86
From: Rusty Russell <rusty@rustcorp.com.au>

In fact, all cpumask ops will only be valid (in general) for bit
numbers < nr_cpu_ids.  So use that instead of NR_CPUS in iterators
and other comparisons.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 arch/x86/kernel/apic.c              |    2 +-
 arch/x86/kernel/irq_32.c            |    2 +-
 arch/x86/mach-voyager/voyager_smp.c |    2 +-
 arch/x86/mm/numa_64.c               |    4 ++--
 arch/x86/mm/srat_64.c               |    2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)

diff -r bb80787e9617 arch/x86/kernel/apic.c
--- a/arch/x86/kernel/apic.c	Fri Oct 24 00:27:01 2008 +1100
+++ b/arch/x86/kernel/apic.c	Fri Oct 24 00:28:25 2008 +1100
@@ -2106,7 +2106,7 @@ __cpuinit int apic_is_clustered_box(void
 	bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
 	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
 
-	for (i = 0; i < NR_CPUS; i++) {
+	for (i = 0; i < nr_cpu_ids; i++) {
 		/* are we being called early in kernel startup? */
 		if (bios_cpu_apicid) {
 			id = bios_cpu_apicid[i];
diff -r bb80787e9617 arch/x86/kernel/irq_32.c
--- a/arch/x86/kernel/irq_32.c	Fri Oct 24 00:27:01 2008 +1100
+++ b/arch/x86/kernel/irq_32.c	Fri Oct 24 00:28:25 2008 +1100
@@ -246,7 +246,7 @@ void fixup_irqs(cpumask_t map)
 			continue;
 
 		cpus_and(mask, desc->affinity, map);
-		if (any_online_cpu(mask) == NR_CPUS) {
+		if (any_online_cpu(mask) >= nr_cpu_ids) {
 			printk("Breaking affinity for irq %i\n", irq);
 			mask = map;
 		}
diff -r bb80787e9617 arch/x86/mach-voyager/voyager_smp.c
--- a/arch/x86/mach-voyager/voyager_smp.c	Fri Oct 24 00:27:01 2008 +1100
+++ b/arch/x86/mach-voyager/voyager_smp.c	Fri Oct 24 00:28:25 2008 +1100
@@ -661,7 +661,7 @@ void __init smp_boot_cpus(void)
 
 	/* loop over all the extended VIC CPUs and boot them.  The
 	 * Quad CPUs must be bootstrapped by their extended VIC cpu */
-	for (i = 0; i < NR_CPUS; i++) {
+	for (i = 0; i < nr_cpu_ids; i++) {
 		if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
 			continue;
 		do_boot_cpu(i);
diff -r bb80787e9617 arch/x86/mm/numa_64.c
--- a/arch/x86/mm/numa_64.c	Fri Oct 24 00:27:01 2008 +1100
+++ b/arch/x86/mm/numa_64.c	Fri Oct 24 00:28:25 2008 +1100
@@ -278,7 +278,7 @@ void __init numa_init_array(void)
 	int rr, i;
 
 	rr = first_node(node_online_map);
-	for (i = 0; i < NR_CPUS; i++) {
+	for (i = 0; i < nr_cpu_ids; i++) {
 		if (early_cpu_to_node(i) != NUMA_NO_NODE)
 			continue;
 		numa_set_node(i, rr);
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long s
 	memnodemap[0] = 0;
 	node_set_online(0);
 	node_set(0, node_possible_map);
-	for (i = 0; i < NR_CPUS; i++)
+	for (i = 0; i < nr_cpu_ids; i++)
 		numa_set_node(i, 0);
 	e820_register_active_regions(0, start_pfn, last_pfn);
 	setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
diff -r bb80787e9617 arch/x86/mm/srat_64.c
--- a/arch/x86/mm/srat_64.c	Fri Oct 24 00:27:01 2008 +1100
+++ b/arch/x86/mm/srat_64.c	Fri Oct 24 00:28:25 2008 +1100
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long
 		if (!node_online(i))
 			setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 
-	for (i = 0; i < NR_CPUS; i++) {
+	for (i = 0; i < nr_cpu_ids; i++) {
 		int node = early_cpu_to_node(i);
 
 		if (node == NUMA_NO_NODE)
