x86: make TARGET_CPUS/target_cpus take a const struct cpumask *

This does the 32 bit code only.  It converts cpu_mask_to_apicid() the
same way, for simplicity.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/x86/kernel/genx2apic_uv_x.c         |    2 +-
 arch/x86/kernel/io_apic_32.c             |   31 ++++++++++++-------------------
 include/asm-x86/genapic_32.h             |    4 ++--
 include/asm-x86/mach-bigsmp/mach_apic.h  |   12 ++++++------
 include/asm-x86/mach-default/mach_apic.h |   10 +++++-----
 include/asm-x86/mach-es7000/mach_apic.h  |   16 ++++++++--------
 include/asm-x86/mach-numaq/mach_apic.h   |    6 +++---
 include/asm-x86/mach-summit/mach_apic.h  |   12 ++++++------
 8 files changed, 43 insertions(+), 50 deletions(-)

diff -r 4f690c30ff3c arch/x86/kernel/genx2apic_uv_x.c
--- a/arch/x86/kernel/genx2apic_uv_x.c	Fri Oct 03 16:50:10 2008 +1000
+++ b/arch/x86/kernel/genx2apic_uv_x.c	Fri Oct 03 17:06:21 2008 +1000
@@ -123,7 +123,7 @@ static int uv_apic_id_registered(void)
 	return 1;
 }
 
-static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
+static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int cpu;
 
diff -r 4f690c30ff3c arch/x86/kernel/io_apic_32.c
--- a/arch/x86/kernel/io_apic_32.c	Fri Oct 03 16:50:10 2008 +1000
+++ b/arch/x86/kernel/io_apic_32.c	Fri Oct 03 17:06:21 2008 +1000
@@ -345,8 +345,8 @@ static void set_ioapic_affinity_irq(unsi
 	spin_lock_irqsave(&ioapic_lock, flags);
 	cpumask_and(&irq_desc[irq].affinity, cpumask, cpu_online_mask);
 	if (cpumask_empty(&irq_desc[irq].affinity))
-		irq_desc[irq].affinity = TARGET_CPUS;
-	apicid_value = cpu_mask_to_apicid(irq_desc[irq].affinity);
+		cpumask_copy(&irq_desc[irq].affinity, TARGET_CPUS);
+	apicid_value = cpu_mask_to_apicid(&irq_desc[irq].affinity);
 
 	/* Prepare to do the io_apic_write */
 	apicid_value = apicid_value << 24;
@@ -911,13 +911,11 @@ void __init setup_ioapic_dest(void)
 
 	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
 		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-			cpumask_t tmp;
 			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
 			if (irq_entry == -1)
 				continue;
 			irq = pin_2_irq(irq_entry, ioapic, pin);
-			tmp = TARGET_CPUS;
-			set_ioapic_affinity_irq(irq, &tmp);
+			set_ioapic_affinity_irq(irq, TARGET_CPUS);
 		}
 
 	}
@@ -2500,22 +2498,21 @@ MSI_DATA_DELIVERY_FIXED:
 }
 
 #ifdef CONFIG_SMP
-static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
+static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
 {
 	struct msi_msg msg;
 	unsigned int dest;
-	cpumask_t tmp;
 	int vector;
-
-	cpus_and(tmp, mask, cpu_online_map);
-	if (cpus_empty(tmp))
-		tmp = TARGET_CPUS;
 
 	vector = assign_irq_vector(irq);
 	if (vector < 0)
 		return;
 
-	dest = cpu_mask_to_apicid(mask);
+	cpumask_and(&irq_desc[irq].affinity, mask, cpu_online_mask);
+	if (cpumask_empty(&irq_desc[irq].affinity))
+		cpumask_copy(&irq_desc[irq].affinity, TARGET_CPUS);
+
+	dest = cpu_mask_to_apicid(&irq_desc[irq].affinity);
 
 	read_msi_msg(irq, &msg);
 
@@ -2525,7 +2522,6 @@ static void set_msi_irq_affinity(unsigne
 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
 	write_msi_msg(irq, &msg);
-	irq_desc[irq].affinity = mask;
 }
 #endif /* CONFIG_SMP */
 
@@ -2601,9 +2597,9 @@ static void set_ht_irq_affinity(unsigned
 
 	cpumask_and(&irq_desc[irq].affinity, mask, cpu_online_mask);
 	if (cpumask_empty(&irq_desc[irq].affinity))
-		irq_desc[irq].affinity = TARGET_CPUS;
+		cpumask_copy(&irq_desc[irq].affinity, TARGET_CPUS);
 
-	dest = cpu_mask_to_apicid(irq_desc[irq].affinity);
+	dest = cpu_mask_to_apicid(&irq_desc[irq].affinity);
 
 	target_ht_irq(irq, dest);
 }
@@ -2628,11 +2624,8 @@ int arch_setup_ht_irq(unsigned int irq, 
 	if (vector >= 0) {
 		struct ht_irq_msg msg;
 		unsigned dest;
-		cpumask_t tmp;
 
-		cpus_clear(tmp);
-		cpu_set(vector >> 8, tmp);
-		dest = cpu_mask_to_apicid(tmp);
+		dest = cpu_mask_to_apicid(cpumask_of(vector >> 8));
 
 		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
 
diff -r 4f690c30ff3c include/asm-x86/genapic_32.h
--- a/include/asm-x86/genapic_32.h	Fri Oct 03 16:50:10 2008 +1000
+++ b/include/asm-x86/genapic_32.h	Fri Oct 03 17:06:21 2008 +1000
@@ -23,7 +23,7 @@ struct genapic {
 	int (*probe)(void);
 
 	int (*apic_id_registered)(void);
-	cpumask_t (*target_cpus)(void);
+	const struct cpumask *(*target_cpus)(void);
 	int int_delivery_mode;
 	int int_dest_mode;
 	int ESR_DISABLE;
@@ -56,7 +56,7 @@ struct genapic {
 
 	unsigned (*get_apic_id)(unsigned long x);
 	unsigned long apic_id_mask;
-	unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
+	unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
 
 #ifdef CONFIG_SMP
 	/* ipi */
diff -r 4f690c30ff3c include/asm-x86/mach-bigsmp/mach_apic.h
--- a/include/asm-x86/mach-bigsmp/mach_apic.h	Fri Oct 03 16:50:10 2008 +1000
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h	Fri Oct 03 17:06:21 2008 +1000
@@ -10,16 +10,16 @@ static inline int apic_id_registered(voi
 }
 
 /* Round robin the irqs amoung the online cpus */
-static inline cpumask_t target_cpus(void)
+static inline const struct cpumask *target_cpus(void)
 { 
 	static unsigned long cpu = NR_CPUS;
 	do {
 		if (cpu >= nr_cpu_ids)
-			cpu = first_cpu(cpu_online_map);
+			cpu = cpumask_first(cpu_online_mask);
 		else
-			cpu = next_cpu(cpu, cpu_online_map);
+			cpu = cpumask_next(cpu, cpu_online_mask);
 	} while (cpu >= nr_cpu_ids);
-	return cpumask_of_cpu(cpu);
+	return cpumask_of(cpu);
 }
 
 #undef APIC_DEST_LOGICAL
@@ -126,12 +126,12 @@ static inline int check_phys_apicid_pres
 }
 
 /* As we are using single CPU as destination, pick only one CPU here */
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int cpu;
 	int apicid;	
 
-	cpu = first_cpu(cpumask);
+	cpu = cpumask_first(cpumask);
 	apicid = cpu_to_logical_apicid(cpu);
 	return apicid;
 }
diff -r 4f690c30ff3c include/asm-x86/mach-default/mach_apic.h
--- a/include/asm-x86/mach-default/mach_apic.h	Fri Oct 03 16:50:10 2008 +1000
+++ b/include/asm-x86/mach-default/mach_apic.h	Fri Oct 03 17:06:21 2008 +1000
@@ -8,12 +8,12 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_FLAT)
 
-static inline cpumask_t target_cpus(void)
+static inline const struct cpumask *target_cpus(void)
 { 
 #ifdef CONFIG_SMP
-	return cpu_online_map;
+	return cpu_online_mask;
 #else
-	return cpumask_of_cpu(0);
+	return cpumask_of(0);
 #endif
 } 
 
@@ -57,9 +57,9 @@ static inline int apic_id_registered(voi
 	return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
 }
 
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-	return cpus_addr(cpumask)[0];
+	return cpumask_bits(cpumask)[0];
 }
 
 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
diff -r 4f690c30ff3c include/asm-x86/mach-es7000/mach_apic.h
--- a/include/asm-x86/mach-es7000/mach_apic.h	Fri Oct 03 16:50:10 2008 +1000
+++ b/include/asm-x86/mach-es7000/mach_apic.h	Fri Oct 03 17:06:21 2008 +1000
@@ -9,12 +9,12 @@ static inline int apic_id_registered(voi
 	        return (1);
 }
 
-static inline cpumask_t target_cpus(void)
+static inline const struct cpumask *target_cpus(void)
 { 
 #if defined CONFIG_ES7000_CLUSTERED_APIC
-	return CPU_MASK_ALL;
+	return cpu_all_mask;
 #else
-	return cpumask_of_cpu(smp_processor_id());
+	return cpumask_of(smp_processor_id());
 #endif
 }
 #define TARGET_CPUS	(target_cpus())
@@ -81,7 +81,7 @@ static inline void setup_apic_routing(vo
 	int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
 	printk("Enabling APIC mode:  %s.  Using %d I/O APICs, target cpus %lx\n",
 		(apic_version[apic] == 0x14) ? 
-		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
+		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpumask_bits(TARGET_CPUS)[0]);
 }
 
 static inline int multi_timer_check(int apic, int irq)
@@ -145,14 +145,14 @@ static inline int check_phys_apicid_pres
 	return (1);
 }
 
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int num_bits_set;
 	int cpus_found = 0;
 	int cpu;
 	int apicid;	
 
-	num_bits_set = cpus_weight(cpumask);
+	num_bits_set = cpumask_weight(cpumask);
 	/* Return id to all */
 	if (num_bits_set == NR_CPUS)
 #if defined CONFIG_ES7000_CLUSTERED_APIC
@@ -164,10 +164,10 @@ static inline unsigned int cpu_mask_to_a
 	 * The cpus in the mask must all be on the apic cluster.  If are not 
 	 * on the same apicid cluster return default value of TARGET_CPUS. 
 	 */
-	cpu = first_cpu(cpumask);
+	cpu = cpumask_first(cpumask);
 	apicid = cpu_to_logical_apicid(cpu);
 	while (cpus_found < num_bits_set) {
-		if (cpu_isset(cpu, cpumask)) {
+		if (cpumask_test_cpu(cpu, cpumask)) {
 			int new_apicid = cpu_to_logical_apicid(cpu);
 			if (apicid_cluster(apicid) != 
 					apicid_cluster(new_apicid)){
diff -r 4f690c30ff3c include/asm-x86/mach-numaq/mach_apic.h
--- a/include/asm-x86/mach-numaq/mach_apic.h	Fri Oct 03 16:50:10 2008 +1000
+++ b/include/asm-x86/mach-numaq/mach_apic.h	Fri Oct 03 17:06:21 2008 +1000
@@ -7,9 +7,9 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
 
-static inline cpumask_t target_cpus(void)
+static inline const struct cpumask *target_cpus(void)
 {
-	return CPU_MASK_ALL;
+	return cpu_all_mask;
 }
 
 #define TARGET_CPUS (target_cpus())
@@ -124,7 +124,7 @@ static inline void enable_apic_mode(void
  * We use physical apicids here, not logical, so just return the default
  * physical broadcast to stop people from breaking us
  */
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	return (int) 0xF;
 }
diff -r 4f690c30ff3c include/asm-x86/mach-summit/mach_apic.h
--- a/include/asm-x86/mach-summit/mach_apic.h	Fri Oct 03 16:50:10 2008 +1000
+++ b/include/asm-x86/mach-summit/mach_apic.h	Fri Oct 03 17:06:21 2008 +1000
@@ -14,13 +14,13 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
 
-static inline cpumask_t target_cpus(void)
+static inline struct cpumask *target_cpus(void)
 {
 	/* CPU_MASK_ALL (0xff) has undefined behaviour with
 	 * dest_LowestPrio mode logical clustered apic interrupt routing
 	 * Just start on cpu 0.  IRQ balancing will spread load
 	 */
-	return cpumask_of_cpu(0);
+	return cpumask_of(0);
 } 
 #define TARGET_CPUS	(target_cpus())
 
@@ -138,14 +138,14 @@ static inline void enable_apic_mode(void
 {
 }
 
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int num_bits_set;
 	int cpus_found = 0;
 	int cpu;
 	int apicid;	
 
-	num_bits_set = cpus_weight(cpumask);
+	num_bits_set = cpumask_weight(cpumask);
 	/* Return id to all */
 	if (num_bits_set == NR_CPUS)
 		return (int) 0xFF;
@@ -153,10 +153,10 @@ static inline unsigned int cpu_mask_to_a
 	 * The cpus in the mask must all be on the apic cluster.  If are not 
 	 * on the same apicid cluster return default value of TARGET_CPUS. 
 	 */
-	cpu = first_cpu(cpumask);
+	cpu = cpumask_first(cpumask);
 	apicid = cpu_to_logical_apicid(cpu);
 	while (cpus_found < num_bits_set) {
-		if (cpu_isset(cpu, cpumask)) {
+		if (cpumask_test_cpu(cpu, cpumask)) {
 			int new_apicid = cpu_to_logical_apicid(cpu);
 			if (apicid_cluster(apicid) != 
 					apicid_cluster(new_apicid)){
