cpumask: replace for_each_cpu_mask_nr with for_each_cpu_mask everywhere

Simple replacement, now the _nr is redundant.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/sparc64/kernel/smp.c                        |    2 +-
 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c       |    6 +++---
 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c        |    6 +++---
 arch/x86/kernel/cpu/cpufreq/powernow-k8.c        |    8 ++++----
 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c |   10 +++++-----
 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c      |    4 ++--
 arch/x86/kernel/cpu/intel_cacheinfo.c            |    2 +-
 arch/x86/kernel/cpu/mcheck/mce_amd_64.c          |    4 ++--
 arch/x86/kernel/io_apic_64.c                     |    8 ++++----
 arch/x86/kernel/smpboot.c                        |    8 ++++----
 arch/x86/xen/smp.c                               |    4 ++--
 drivers/acpi/processor_throttling.c              |    6 +++---
 drivers/cpufreq/cpufreq.c                        |   14 +++++++-------
 drivers/cpufreq/cpufreq_conservative.c           |    2 +-
 drivers/cpufreq/cpufreq_ondemand.c               |    4 ++--
 drivers/infiniband/hw/ehca/ehca_irq.c            |    2 +-
 include/asm-x86/ipi.h                            |    2 +-
 kernel/cpu.c                                     |    2 +-
 kernel/rcuclassic.c                              |    2 +-
 kernel/rcupreempt.c                              |   10 +++++-----
 kernel/sched_fair.c                              |    2 +-
 kernel/sched_rt.c                                |    4 ++--
 kernel/taskstats.c                               |    4 ++--
 kernel/time/clocksource.c                        |    2 +-
 kernel/time/tick-broadcast.c                     |    7 ++++---
 mm/quicklist.c                                   |    2 +-
 net/core/dev.c                                   |    4 ++--
 net/iucv/iucv.c                                  |    2 +-
 28 files changed, 67 insertions(+), 66 deletions(-)

diff -r de7f82ecac79 arch/sparc64/kernel/smp.c
--- a/arch/sparc64/kernel/smp.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/sparc64/kernel/smp.c	Thu Oct 02 16:30:46 2008 +1000
@@ -765,7 +765,7 @@ static void xcall_deliver(u64 data0, u64
 
 	/* Setup the initial cpu list.  */
 	cnt = 0;
-	for_each_cpu_mask_nr(i, *mask) {
+	for_each_cpu_mask(i, *mask) {
 		if (i == this_cpu || !cpu_online(i))
 			continue;
 		cpu_list[cnt++] = i;
diff -r de7f82ecac79 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c	Thu Oct 02 16:30:46 2008 +1000
@@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cm
 	cpumask_t saved_mask = current->cpus_allowed;
 	unsigned int i;
 
-	for_each_cpu_mask_nr(i, cmd->mask) {
+	for_each_cpu_mask(i, cmd->mask) {
 		set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
 		do_drv_write(cmd);
 	}
@@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cp
 
 	freqs.old = perf->states[perf->state].core_frequency * 1000;
 	freqs.new = data->freq_table[next_state].frequency;
-	for_each_cpu_mask_nr(i, cmd.mask) {
+	for_each_cpu_mask(i, cmd.mask) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cp
 		}
 	}
 
-	for_each_cpu_mask_nr(i, cmd.mask) {
+	for_each_cpu_mask(i, cmd.mask) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
diff -r de7f82ecac79 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c	Thu Oct 02 16:30:46 2008 +1000
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpuf
 		return 0;
 
 	/* notifiers */
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu_mask(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpuf
 	/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
 	 * Developer's Manual, Volume 3
 	 */
-	for_each_cpu_mask_nr(i, policy->cpus)
+	for_each_cpu_mask(i, policy->cpus)
 		cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
 
 	/* notifiers */
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu_mask(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
diff -r de7f82ecac79 arch/x86/kernel/cpu/cpufreq/powernow-k8.c
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c	Thu Oct 02 16:30:46 2008 +1000
@@ -963,7 +963,7 @@ static int transition_frequency_fidvid(s
 	freqs.old = find_khz_freq_from_fid(data->currfid);
 	freqs.new = find_khz_freq_from_fid(fid);
 
-	for_each_cpu_mask_nr(i, *(data->available_cores)) {
+	for_each_cpu_mask(i, *(data->available_cores)) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -971,7 +971,7 @@ static int transition_frequency_fidvid(s
 	res = transition_fid_vid(data, fid, vid);
 	freqs.new = find_khz_freq_from_fid(data->currfid);
 
-	for_each_cpu_mask_nr(i, *(data->available_cores)) {
+	for_each_cpu_mask(i, *(data->available_cores)) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
@@ -994,7 +994,7 @@ static int transition_frequency_pstate(s
 	freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
 	freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
 
-	for_each_cpu_mask_nr(i, *(data->available_cores)) {
+	for_each_cpu_mask(i, *(data->available_cores)) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -1002,7 +1002,7 @@ static int transition_frequency_pstate(s
 	res = transition_pstate(data, pstate);
 	freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
 
-	for_each_cpu_mask_nr(i, *(data->available_cores)) {
+	for_each_cpu_mask(i, *(data->available_cores)) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
diff -r de7f82ecac79 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c	Thu Oct 02 16:30:46 2008 +1000
@@ -498,7 +498,7 @@ static int centrino_target (struct cpufr
 	}
 
 	first_cpu = 1;
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu_mask(j, policy->cpus) {
 		const cpumask_t *mask;
 
 		/* cpufreq holds the hotplug lock, so we are safe here */
@@ -544,7 +544,7 @@ static int centrino_target (struct cpufr
 			dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
 				target_freq, freqs.old, freqs.new, msr);
 
-			for_each_cpu_mask_nr(k, policy->cpus) {
+			for_each_cpu_mask(k, policy->cpus) {
 				if (!cpu_online(k))
 					continue;
 				freqs.cpu = k;
@@ -569,7 +569,7 @@ static int centrino_target (struct cpufr
 		preempt_enable();
 	}
 
-	for_each_cpu_mask_nr(k, policy->cpus) {
+	for_each_cpu_mask(k, policy->cpus) {
 		if (!cpu_online(k))
 			continue;
 		freqs.cpu = k;
@@ -584,7 +584,7 @@ static int centrino_target (struct cpufr
 		 * Best effort undo..
 		 */
 
-		for_each_cpu_mask_nr(j, *covered_cpus) {
+		for_each_cpu_mask(j, *covered_cpus) {
 			set_cpus_allowed_ptr(current, &cpumask_of_cpu(j));
 			wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
 		}
@@ -592,7 +592,7 @@ static int centrino_target (struct cpufr
 		tmp = freqs.new;
 		freqs.new = freqs.old;
 		freqs.old = tmp;
-		for_each_cpu_mask_nr(j, policy->cpus) {
+		for_each_cpu_mask(j, policy->cpus) {
 			if (!cpu_online(j))
 				continue;
 			cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
diff -r de7f82ecac79 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c	Thu Oct 02 16:30:46 2008 +1000
@@ -279,7 +279,7 @@ static int speedstep_target (struct cpuf
 
 	cpus_allowed = current->cpus_allowed;
 
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu_mask(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -292,7 +292,7 @@ static int speedstep_target (struct cpuf
 	/* allow to be run on all CPUs */
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu_mask(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
diff -r de7f82ecac79 arch/x86/kernel/cpu/intel_cacheinfo.c
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c	Thu Oct 02 16:30:46 2008 +1000
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_share
 	int sibling;
 
 	this_leaf = CPUID4_INFO_IDX(cpu, index);
-	for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
+	for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
 		sibling_leaf = CPUID4_INFO_IDX(sibling, index);	
 		cpu_clear(cpu, sibling_leaf->shared_cpu_map);
 	}
diff -r de7f82ecac79 arch/x86/kernel/cpu/mcheck/mce_amd_64.c
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c	Thu Oct 02 16:30:46 2008 +1000
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_ba
 	if (err)
 		goto out_free;
 
-	for_each_cpu_mask_nr(i, b->cpus) {
+	for_each_cpu_mask(i, b->cpus) {
 		if (i == cpu)
 			continue;
 
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsign
 #endif
 
 	/* remove all sibling symlinks before unregistering */
-	for_each_cpu_mask_nr(i, b->cpus) {
+	for_each_cpu_mask(i, b->cpus) {
 		if (i == cpu)
 			continue;
 
diff -r de7f82ecac79 arch/x86/kernel/io_apic_64.c
--- a/arch/x86/kernel/io_apic_64.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/io_apic_64.c	Thu Oct 02 16:30:46 2008 +1000
@@ -745,7 +745,7 @@ static int __assign_irq_vector(int irq, 
 			return 0;
 	}
 
-	for_each_cpu_mask_nr(cpu, mask) {
+	for_each_cpu_mask(cpu, mask) {
 		cpumask_t domain, new_mask;
 		int new_cpu;
 		int vector, offset;
@@ -766,7 +766,7 @@ next:
 			continue;
 		if (vector == IA32_SYSCALL_VECTOR)
 			goto next;
-		for_each_cpu_mask_nr(new_cpu, new_mask)
+		for_each_cpu_mask(new_cpu, new_mask)
 			if (per_cpu(vector_irq, new_cpu)[vector] != -1)
 				goto next;
 		/* Found one! */
@@ -776,7 +776,7 @@ next:
 			cfg->move_in_progress = 1;
 			cfg->old_domain = cfg->domain;
 		}
-		for_each_cpu_mask_nr(new_cpu, new_mask)
+		for_each_cpu_mask(new_cpu, new_mask)
 			per_cpu(vector_irq, new_cpu)[vector] = irq;
 		cfg->vector = vector;
 		cfg->domain = domain;
@@ -808,7 +808,7 @@ static void __clear_irq_vector(int irq)
 
 	vector = cfg->vector;
 	cpus_and(mask, cfg->domain, cpu_online_map);
-	for_each_cpu_mask_nr(cpu, mask)
+	for_each_cpu_mask(cpu, mask)
 		per_cpu(vector_irq, cpu)[vector] = -1;
 
 	cfg->vector = 0;
diff -r de7f82ecac79 arch/x86/kernel/smpboot.c
--- a/arch/x86/kernel/smpboot.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/kernel/smpboot.c	Thu Oct 02 16:30:46 2008 +1000
@@ -436,7 +436,7 @@ void __cpuinit set_cpu_sibling_map(int c
 	cpu_set(cpu, cpu_sibling_setup_map);
 
 	if (smp_num_siblings > 1) {
-		for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
+		for_each_cpu_mask(i, cpu_sibling_setup_map) {
 			if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
 			    c->cpu_core_id == cpu_data(i).cpu_core_id) {
 				cpu_set(i, per_cpu(cpu_sibling_map, cpu));
@@ -459,7 +459,7 @@ void __cpuinit set_cpu_sibling_map(int c
 		return;
 	}
 
-	for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
+	for_each_cpu_mask(i, cpu_sibling_setup_map) {
 		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
 		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
 			cpu_set(i, c->llc_shared_map);
@@ -1258,7 +1258,7 @@ static void remove_siblinginfo(int cpu)
 	int sibling;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
+	for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
 		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
 		/*/
 		 * last thread sibling in this cpu core going down
@@ -1267,7 +1267,7 @@ static void remove_siblinginfo(int cpu)
 			cpu_data(sibling).booted_cores--;
 	}
 
-	for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
+	for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
 		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
 	cpus_clear(per_cpu(cpu_sibling_map, cpu));
 	cpus_clear(per_cpu(cpu_core_map, cpu));
diff -r de7f82ecac79 arch/x86/xen/smp.c
--- a/arch/x86/xen/smp.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/arch/x86/xen/smp.c	Thu Oct 02 16:30:46 2008 +1000
@@ -367,7 +367,7 @@ static void xen_send_IPI_mask(cpumask_t 
 
 	cpus_and(mask, mask, cpu_online_map);
 
-	for_each_cpu_mask_nr(cpu, mask)
+	for_each_cpu_mask(cpu, mask)
 		xen_send_IPI_one(cpu, vector);
 }
 
@@ -378,7 +378,7 @@ static void xen_smp_send_call_function_i
 	xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
 	/* Make sure other vcpus get a chance to run if they need to. */
-	for_each_cpu_mask_nr(cpu, mask) {
+	for_each_cpu_mask(cpu, mask) {
 		if (xen_vcpu_stolen(cpu)) {
 			HYPERVISOR_sched_op(SCHEDOP_yield, 0);
 			break;
diff -r de7f82ecac79 drivers/acpi/processor_throttling.c
--- a/drivers/acpi/processor_throttling.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/drivers/acpi/processor_throttling.c	Thu Oct 02 16:30:46 2008 +1000
@@ -1013,7 +1013,7 @@ int acpi_processor_set_throttling(struct
 	 * affected cpu in order to get one proper T-state.
 	 * The notifier event is THROTTLING_PRECHANGE.
 	 */
-	for_each_cpu_mask_nr(i, online_throttling_cpus) {
+	for_each_cpu_mask(i, online_throttling_cpus) {
 		t_state.cpu = i;
 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
 							&t_state);
@@ -1034,7 +1034,7 @@ int acpi_processor_set_throttling(struct
 		 * it is necessary to set T-state for every affected
 		 * cpus.
 		 */
-		for_each_cpu_mask_nr(i, online_throttling_cpus) {
+		for_each_cpu_mask(i, online_throttling_cpus) {
 			match_pr = per_cpu(processors, i);
 			/*
 			 * If the pointer is invalid, we will report the
@@ -1068,7 +1068,7 @@ int acpi_processor_set_throttling(struct
 	 * affected cpu to update the T-states.
 	 * The notifier event is THROTTLING_POSTCHANGE
 	 */
-	for_each_cpu_mask_nr(i, online_throttling_cpus) {
+	for_each_cpu_mask(i, online_throttling_cpus) {
 		t_state.cpu = i;
 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
 							&t_state);
diff -r de7f82ecac79 drivers/cpufreq/cpufreq.c
--- a/drivers/cpufreq/cpufreq.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/drivers/cpufreq/cpufreq.c	Thu Oct 02 16:30:46 2008 +1000
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask,
 	ssize_t i = 0;
 	unsigned int cpu;
 
-	for_each_cpu_mask_nr(cpu, mask) {
+	for_each_cpu_mask(cpu, mask) {
 		if (i)
 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -838,7 +838,7 @@ static int cpufreq_add_dev(struct sys_de
 	}
 #endif
 
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu_mask(j, policy->cpus) {
 		if (cpu == j)
 			continue;
 
@@ -901,14 +901,14 @@ static int cpufreq_add_dev(struct sys_de
 	}
 
 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu_mask(j, policy->cpus) {
 		per_cpu(cpufreq_cpu_data, j) = policy;
 		per_cpu(policy_cpu, j) = policy->cpu;
 	}
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	/* symlink affected CPUs */
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu_mask(j, policy->cpus) {
 		if (j == cpu)
 			continue;
 		if (!cpu_online(j))
@@ -948,7 +948,7 @@ static int cpufreq_add_dev(struct sys_de
 
 err_out_unregister:
 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
-	for_each_cpu_mask_nr(j, policy->cpus)
+	for_each_cpu_mask(j, policy->cpus)
 		per_cpu(cpufreq_cpu_data, j) = NULL;
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
@@ -1031,7 +1031,7 @@ static int __cpufreq_remove_dev(struct s
 	 * the sysfs links afterwards.
 	 */
 	if (unlikely(cpus_weight(data->cpus) > 1)) {
-		for_each_cpu_mask_nr(j, data->cpus) {
+		for_each_cpu_mask(j, data->cpus) {
 			if (j == cpu)
 				continue;
 			per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1041,7 +1041,7 @@ static int __cpufreq_remove_dev(struct s
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	if (unlikely(cpus_weight(data->cpus) > 1)) {
-		for_each_cpu_mask_nr(j, data->cpus) {
+		for_each_cpu_mask(j, data->cpus) {
 			if (j == cpu)
 				continue;
 			dprintk("removing link for cpu %u\n", j);
diff -r de7f82ecac79 drivers/cpufreq/cpufreq_conservative.c
--- a/drivers/cpufreq/cpufreq_conservative.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/drivers/cpufreq/cpufreq_conservative.c	Thu Oct 02 16:30:46 2008 +1000
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct c
 			return rc;
 		}
 
-		for_each_cpu_mask_nr(j, policy->cpus) {
+		for_each_cpu_mask(j, policy->cpus) {
 			struct cpu_dbs_info_s *j_dbs_info;
 			j_dbs_info = &per_cpu(cpu_dbs_info, j);
 			j_dbs_info->cur_policy = policy;
diff -r de7f82ecac79 drivers/cpufreq/cpufreq_ondemand.c
--- a/drivers/cpufreq/cpufreq_ondemand.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/drivers/cpufreq/cpufreq_ondemand.c	Thu Oct 02 16:30:46 2008 +1000
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs
 
 	/* Get Idle Time */
 	idle_ticks = UINT_MAX;
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu_mask(j, policy->cpus) {
 		cputime64_t total_idle_ticks;
 		unsigned int tmp_idle_ticks;
 		struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct c
 			return rc;
 		}
 
-		for_each_cpu_mask_nr(j, policy->cpus) {
+		for_each_cpu_mask(j, policy->cpus) {
 			struct cpu_dbs_info_s *j_dbs_info;
 			j_dbs_info = &per_cpu(cpu_dbs_info, j);
 			j_dbs_info->cur_policy = policy;
diff -r de7f82ecac79 drivers/infiniband/hw/ehca/ehca_irq.c
--- a/drivers/infiniband/hw/ehca/ehca_irq.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c	Thu Oct 02 16:30:46 2008 +1000
@@ -650,7 +650,7 @@ static inline int find_next_online_cpu(s
 		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
 
 	spin_lock_irqsave(&pool->last_cpu_lock, flags);
-	cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+	cpu = next_cpu(pool->last_cpu, cpu_online_map);
 	if (cpu >= nr_cpu_ids)
 		cpu = first_cpu(cpu_online_map);
 	pool->last_cpu = cpu;
diff -r de7f82ecac79 include/asm-x86/ipi.h
--- a/include/asm-x86/ipi.h	Thu Oct 02 16:11:51 2008 +1000
+++ b/include/asm-x86/ipi.h	Thu Oct 02 16:30:46 2008 +1000
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequenc
 	 * - mbligh
 	 */
 	local_irq_save(flags);
-	for_each_cpu_mask_nr(query_cpu, mask) {
+	for_each_cpu_mask(query_cpu, mask) {
 		__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
 				      vector, APIC_DEST_PHYSICAL);
 	}
diff -r de7f82ecac79 kernel/cpu.c
--- a/kernel/cpu.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/cpu.c	Thu Oct 02 16:30:47 2008 +1000
@@ -416,7 +416,7 @@ void __ref enable_nonboot_cpus(void)
 		goto out;
 
 	printk("Enabling non-boot CPUs ...\n");
-	for_each_cpu_mask_nr(cpu, frozen_cpus) {
+	for_each_cpu_mask(cpu, frozen_cpus) {
 		error = _cpu_up(cpu, 1);
 		if (!error) {
 			printk("CPU%d is up\n", cpu);
diff -r de7f82ecac79 kernel/rcuclassic.c
--- a/kernel/rcuclassic.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/rcuclassic.c	Thu Oct 02 16:30:47 2008 +1000
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct
 		 */
 		cpus_and(cpumask, rcp->cpumask, cpu_online_map);
 		cpu_clear(rdp->cpu, cpumask);
-		for_each_cpu_mask_nr(cpu, cpumask)
+		for_each_cpu_mask(cpu, cpumask)
 			smp_send_reschedule(cpu);
 	}
 }
diff -r de7f82ecac79 kernel/rcupreempt.c
--- a/kernel/rcupreempt.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/rcupreempt.c	Thu Oct 02 16:30:47 2008 +1000
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
 
 	/* Now ask each CPU for acknowledgement of the flip. */
 
-	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
+	for_each_cpu_mask(cpu, rcu_cpu_online_map) {
 		per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
 		dyntick_save_progress_counter(cpu);
 	}
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
 	int cpu;
 
 	RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
-	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
+	for_each_cpu_mask(cpu, rcu_cpu_online_map)
 		if (rcu_try_flip_waitack_needed(cpu) &&
 		    per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
 			RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
 	/* Check to see if the sum of the "last" counters is zero. */
 
 	RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
-	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
+	for_each_cpu_mask(cpu, rcu_cpu_online_map)
 		sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
 	if (sum != 0) {
 		RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
 	smp_mb();  /*  ^^^^^^^^^^^^ */
 
 	/* Call for a memory barrier from each CPU. */
-	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
+	for_each_cpu_mask(cpu, rcu_cpu_online_map) {
 		per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
 		dyntick_save_progress_counter(cpu);
 	}
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
 	int cpu;
 
 	RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
-	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
+	for_each_cpu_mask(cpu, rcu_cpu_online_map)
 		if (rcu_try_flip_waitmb_needed(cpu) &&
 		    per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
 			RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff -r de7f82ecac79 kernel/sched_fair.c
--- a/kernel/sched_fair.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/sched_fair.c	Thu Oct 02 16:30:47 2008 +1000
@@ -1033,7 +1033,7 @@ static int wake_idle(int cpu, struct tas
 			&& !task_hot(p, task_rq(p)->clock, sd))) {
 			cpus_and(tmp, sd->span, p->cpus_allowed);
 			cpus_and(tmp, tmp, cpu_active_map);
-			for_each_cpu_mask_nr(i, tmp) {
+			for_each_cpu_mask(i, tmp) {
 				if (idle_cpu(i)) {
 					if (i != task_cpu(p)) {
 						schedstat_inc(p,
diff -r de7f82ecac79 kernel/sched_rt.c
--- a/kernel/sched_rt.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/sched_rt.c	Thu Oct 02 16:30:47 2008 +1000
@@ -242,7 +242,7 @@ static int do_balance_runtime(struct rt_
 
 	spin_lock(&rt_b->rt_runtime_lock);
 	rt_period = ktime_to_ns(rt_b->rt_period);
-	for_each_cpu_mask_nr(i, rd->span) {
+	for_each_cpu_mask(i, rd->span) {
 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 		s64 diff;
 
@@ -1132,7 +1132,7 @@ static int pull_rt_task(struct rq *this_
 
 	next = pick_next_task_rt(this_rq);
 
-	for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
+	for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
 		if (this_cpu == cpu)
 			continue;
 
diff -r de7f82ecac79 kernel/taskstats.c
--- a/kernel/taskstats.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/taskstats.c	Thu Oct 02 16:30:47 2008 +1000
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, c
 		return -EINVAL;
 
 	if (isadd == REGISTER) {
-		for_each_cpu_mask_nr(cpu, mask) {
+		for_each_cpu_mask(cpu, mask) {
 			s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
 					 cpu_to_node(cpu));
 			if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, c
 
 	/* Deregister or cleanup */
 cleanup:
-	for_each_cpu_mask_nr(cpu, mask) {
+	for_each_cpu_mask(cpu, mask) {
 		listeners = &per_cpu(listener_array, cpu);
 		down_write(&listeners->sem);
 		list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff -r de7f82ecac79 kernel/time/clocksource.c
--- a/kernel/time/clocksource.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/time/clocksource.c	Thu Oct 02 16:30:47 2008 +1000
@@ -145,7 +145,7 @@ static void clocksource_watchdog(unsigne
 		 * Cycle through CPUs to check if the CPUs stay
 		 * synchronized to each other.
 		 */
-		int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
+		int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
 
 		if (next_cpu >= nr_cpu_ids)
 			next_cpu = first_cpu(cpu_online_map);
diff -r de7f82ecac79 kernel/time/tick-broadcast.c
--- a/kernel/time/tick-broadcast.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/kernel/time/tick-broadcast.c	Thu Oct 02 16:30:47 2008 +1000
@@ -398,7 +398,7 @@ again:
 	mask = CPU_MASK_NONE;
 	now = ktime_get();
 	/* Find all expired events */
-	for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
+	for_each_cpu_mask(cpu, tick_broadcast_oneshot_mask) {
 		td = &per_cpu(tick_cpu_device, cpu);
 		if (td->evtdev->next_event.tv64 <= now.tv64)
 			cpu_set(cpu, mask);
@@ -490,12 +490,13 @@ static void tick_broadcast_clear_oneshot
 	cpu_clear(cpu, tick_broadcast_oneshot_mask);
 }
 
-static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
+static void tick_broadcast_init_next_event(const struct cpumask *mask,
+					   ktime_t expires)
 {
 	struct tick_device *td;
 	int cpu;
 
-	for_each_cpu_mask_nr(cpu, *mask) {
+	for_each_cpu(cpu, mask) {
 		td = &per_cpu(tick_cpu_device, cpu);
 		if (td->evtdev)
 			td->evtdev->next_event = expires;
diff -r de7f82ecac79 mm/quicklist.c
--- a/mm/quicklist.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/mm/quicklist.c	Thu Oct 02 16:30:47 2008 +1000
@@ -42,7 +42,7 @@ static unsigned long max_pages(unsigned 
 
 	max = node_free_pages / FRACTION_OF_NODE_MEM;
 
-	num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
+	num_cpus_on_node = cpus_weight(*cpumask_on_node);
 	max /= num_cpus_on_node;
 
 	return max(max, min_pages);
diff -r de7f82ecac79 net/core/dev.c
--- a/net/core/dev.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/net/core/dev.c	Thu Oct 02 16:30:47 2008 +1000
@@ -2410,7 +2410,7 @@ out:
 	 */
 	if (!cpus_empty(net_dma.channel_mask)) {
 		int chan_idx;
-		for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
+		for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
 			struct dma_chan *chan = net_dma.channels[chan_idx];
 			if (chan)
 				dma_async_memcpy_issue_pending(chan);
@@ -4552,7 +4552,7 @@ static void net_dma_rebalance(struct net
 	i = 0;
 	cpu = first_cpu(cpu_online_map);
 
-	for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
+	for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
 		chan = net_dma->channels[chan_idx];
 
 		n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
diff -r de7f82ecac79 net/iucv/iucv.c
--- a/net/iucv/iucv.c	Thu Oct 02 16:11:51 2008 +1000
+++ b/net/iucv/iucv.c	Thu Oct 02 16:30:47 2008 +1000
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void)
 	/* Disable all cpu but the first in cpu_irq_cpumask. */
 	cpumask = iucv_irq_cpumask;
 	cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
-	for_each_cpu_mask_nr(cpu, cpumask)
+	for_each_cpu_mask(cpu, cpumask)
 		smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
 }
 
