cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits.: alpha
From: Rusty Russell <rusty@rustcorp.com.au>

In fact, all cpumask ops will only be valid (in general) for bit
numbers < nr_cpu_ids.  So use that instead of NR_CPUS in various
places.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 arch/alpha/kernel/irq.c |    3 ++-
 arch/alpha/kernel/smp.c |    8 ++++----
 2 files changed, 6 insertions(+), 5 deletions(-)

--- linux-2.6.orig/arch/alpha/kernel/irq.c
+++ linux-2.6/arch/alpha/kernel/irq.c
@@ -50,8 +50,9 @@ int irq_select_affinity(unsigned int irq
 	if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
 		return 1;
 
+	/* FIXME: This has an out-by-one error: inc then test! */
 	while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity))
-		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
+		cpu = (cpu < (nr_cpu_ids-1) ? cpu + 1 : 0);
 	last_cpu = cpu;
 
 	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
--- linux-2.6.orig/arch/alpha/kernel/smp.c
+++ linux-2.6/arch/alpha/kernel/smp.c
@@ -502,7 +502,7 @@ smp_cpus_done(unsigned int max_cpus)
 	int cpu;
 	unsigned long bogosum = 0;
 
-	for(cpu = 0; cpu < NR_CPUS; cpu++) 
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
 		if (cpu_online(cpu))
 			bogosum += cpu_data[cpu].loops_per_jiffy;
 	
@@ -703,7 +703,7 @@ flush_tlb_mm(struct mm_struct *mm)
 		flush_tlb_current(mm);
 		if (atomic_read(&mm->mm_users) <= 1) {
 			int cpu, this_cpu = smp_processor_id();
-			for (cpu = 0; cpu < NR_CPUS; cpu++) {
+			for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 				if (!cpu_online(cpu) || cpu == this_cpu)
 					continue;
 				if (mm->context[cpu])
@@ -752,7 +752,7 @@ flush_tlb_page(struct vm_area_struct *vm
 		flush_tlb_current_page(mm, vma, addr);
 		if (atomic_read(&mm->mm_users) <= 1) {
 			int cpu, this_cpu = smp_processor_id();
-			for (cpu = 0; cpu < NR_CPUS; cpu++) {
+			for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 				if (!cpu_online(cpu) || cpu == this_cpu)
 					continue;
 				if (mm->context[cpu])
@@ -808,7 +808,7 @@ flush_icache_user_range(struct vm_area_s
 		__load_new_mm_context(mm);
 		if (atomic_read(&mm->mm_users) <= 1) {
 			int cpu, this_cpu = smp_processor_id();
-			for (cpu = 0; cpu < NR_CPUS; cpu++) {
+			for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 				if (!cpu_online(cpu) || cpu == this_cpu)
 					continue;
 				if (mm->context[cpu])
