x86: make TARGET_CPUS/target_cpus take a const struct cpumask

Convert genapic functions to take struct cpumask * pointers.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 arch/x86/include/asm/bigsmp/ipi.h             |   18 ++++++----
 arch/x86/include/asm/es7000/ipi.h             |   19 ++++++-----
 arch/x86/include/asm/genapic_32.h             |    9 +++--
 arch/x86/include/asm/genapic_64.h             |    9 +++--
 arch/x86/include/asm/ipi.h                    |    6 ++-
 arch/x86/include/asm/mach-default/mach_apic.h |   10 +++---
 arch/x86/include/asm/mach-default/mach_ipi.h  |    6 +--
 arch/x86/include/asm/numaq/ipi.h              |   18 ++++++----
 arch/x86/include/asm/smp.h                    |    8 ++--
 arch/x86/kernel/apic.c                        |    2 -
 arch/x86/kernel/genapic_flat_64.c             |   43 +++++++++++++-------------
 arch/x86/kernel/genx2apic_cluster.c           |   16 ++++-----
 arch/x86/kernel/genx2apic_phys.c              |    6 +--
 arch/x86/kernel/genx2apic_uv_x.c              |   12 +++----
 arch/x86/kernel/io_apic.c                     |    2 -
 arch/x86/kernel/ipi.c                         |   10 +++---
 arch/x86/kernel/smp.c                         |    6 +--
 arch/x86/kernel/tlb_64.c                      |    3 +
 arch/x86/xen/smp.c                            |   20 +++++++-----
 arch/x86/xen/suspend.c                        |    3 +
 arch/x86/xen/xen-ops.h                        |    2 -
 kernel/smp.c                                  |    2 -
 22 files changed, 124 insertions(+), 106 deletions(-)

diff -r 2380271affb1 arch/x86/include/asm/bigsmp/ipi.h
--- a/arch/x86/include/asm/bigsmp/ipi.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/bigsmp/ipi.h	Tue Nov 18 20:53:45 2008 +1030
@@ -1,26 +1,28 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
 
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	cpumask_t mask = cpu_online_map;
-	cpu_clear(smp_processor_id(), mask);
+	DECLARE_BITMAP(mask, NR_CPUS);
 
-	if (!cpus_empty(mask))
-		send_IPI_mask(&mask, vector);
+	cpumask_copy(to_cpumask(mask), cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), mask);
+
+	if (!cpumask_empty(mask))
+		send_IPI_mask(mask, vector);
 }
 
 static inline void send_IPI_all(int vector)
 {
-	send_IPI_mask(&cpu_online_map, vector);
+	send_IPI_mask(cpu_online_mask, vector);
 }
 
 #endif /* __ASM_MACH_IPI_H */
diff -r 2380271affb1 arch/x86/include/asm/es7000/ipi.h
--- a/arch/x86/include/asm/es7000/ipi.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/es7000/ipi.h	Tue Nov 18 20:53:45 2008 +1030
@@ -1,25 +1,28 @@
 #ifndef __ASM_ES7000_IPI_H
 #define __ASM_ES7000_IPI_H
 
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
 
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	cpumask_t mask = cpu_online_map;
-	cpu_clear(smp_processor_id(), mask);
-	if (!cpus_empty(mask))
-		send_IPI_mask(&mask, vector);
+	DECLARE_BITMAP(mask, NR_CPUS);
+
+	cpumask_copy(to_cpumask(mask), cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), mask);
+
+	if (!cpumask_empty(mask))
+		send_IPI_mask(mask, vector);
 }
 
 static inline void send_IPI_all(int vector)
 {
-	send_IPI_mask(&cpu_online_map, vector);
+	send_IPI_mask(cpu_online_mask, vector);
 }
 
 #endif /* __ASM_ES7000_IPI_H */
diff -r 2380271affb1 arch/x86/include/asm/genapic_32.h
--- a/arch/x86/include/asm/genapic_32.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/genapic_32.h	Tue Nov 18 20:53:45 2008 +1030
@@ -23,7 +23,7 @@
 	int (*probe)(void);
 
 	int (*apic_id_registered)(void);
-	const cpumask_t *(*target_cpus)(void);
+	const struct cpumask (*target_cpus)(void);
 	int int_delivery_mode;
 	int int_dest_mode;
 	int ESR_DISABLE;
@@ -56,15 +56,16 @@
 
 	unsigned (*get_apic_id)(unsigned long x);
 	unsigned long apic_id_mask;
-	unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
+	unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
 	unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
 					       const struct cpumask *andmask);
 	void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
 
 #ifdef CONFIG_SMP
 	/* ipi */
-	void (*send_IPI_mask)(const cpumask_t *mask, int vector);
-	void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
+	void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+	void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+					 int vector);
 	void (*send_IPI_allbutself)(int vector);
 	void (*send_IPI_all)(int vector);
 #endif
diff -r 2380271affb1 arch/x86/include/asm/genapic_64.h
--- a/arch/x86/include/asm/genapic_64.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/genapic_64.h	Tue Nov 18 20:53:45 2008 +1030
@@ -20,17 +20,18 @@
 	u32 int_delivery_mode;
 	u32 int_dest_mode;
 	int (*apic_id_registered)(void);
-	const cpumask_t *(*target_cpus)(void);
+	const struct cpumask *(*target_cpus)(void);
 	void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
 	void (*init_apic_ldr)(void);
 	/* ipi */
-	void (*send_IPI_mask)(const cpumask_t *mask, int vector);
-	void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
+	void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+	void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+					 int vector);
 	void (*send_IPI_allbutself)(int vector);
 	void (*send_IPI_all)(int vector);
 	void (*send_IPI_self)(int vector);
 	/* */
-	unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
+	unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
 	unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
 					       const struct cpumask *andmask);
 	unsigned int (*phys_pkg_id)(int index_msb);
diff -r 2380271affb1 arch/x86/include/asm/ipi.h
--- a/arch/x86/include/asm/ipi.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/ipi.h	Tue Nov 18 20:53:45 2008 +1030
@@ -117,7 +117,8 @@
 	native_apic_mem_write(APIC_ICR, cfg);
 }
 
-static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask_sequence(const struct cpumask *mask,
+					  int vector)
 {
 	unsigned long flags;
 	unsigned long query_cpu;
@@ -135,7 +136,8 @@
 	local_irq_restore(flags);
 }
 
-static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
+					    int vector)
 {
 	unsigned long flags;
 	unsigned int query_cpu;
diff -r 2380271affb1 arch/x86/include/asm/mach-default/mach_apic.h
--- a/arch/x86/include/asm/mach-default/mach_apic.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/mach-default/mach_apic.h	Tue Nov 18 20:53:45 2008 +1030
@@ -8,12 +8,12 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_FLAT)
 
-static inline const cpumask_t *target_cpus(void)
+static inline const struct cpumask *target_cpus(void)
 { 
 #ifdef CONFIG_SMP
-	return &cpu_online_map;
+	return cpu_online_mask;
 #else
-	return &cpumask_of_cpu(0);
+	return cpumask_of(0);
 #endif
 } 
 
@@ -60,9 +60,9 @@
 	return physid_isset(read_apic_id(), phys_cpu_present_map);
 }
 
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
+static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-	return cpus_addr(*cpumask)[0];
+	return cpumask_bits(cpumask)[0];
 }
 
 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
diff -r 2380271affb1 arch/x86/include/asm/mach-default/mach_ipi.h
--- a/arch/x86/include/asm/mach-default/mach_ipi.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/mach-default/mach_ipi.h	Tue Nov 18 20:53:45 2008 +1030
@@ -4,8 +4,8 @@
 /* Avoid include hell */
 #define NMI_VECTOR 0x02
 
-void send_IPI_mask_bitmask(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
 void __send_IPI_shortcut(unsigned int shortcut, int vector);
 
 extern int no_broadcast;
@@ -15,7 +15,7 @@
 #define send_IPI_mask (genapic->send_IPI_mask)
 #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
 #else
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	send_IPI_mask_bitmask(mask, vector);
 }
diff -r 2380271affb1 arch/x86/include/asm/numaq/ipi.h
--- a/arch/x86/include/asm/numaq/ipi.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/numaq/ipi.h	Tue Nov 18 20:53:45 2008 +1030
@@ -1,26 +1,28 @@
 #ifndef __ASM_NUMAQ_IPI_H
 #define __ASM_NUMAQ_IPI_H
 
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
 
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
+static inline void send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	cpumask_t mask = cpu_online_map;
-	cpu_clear(smp_processor_id(), mask);
+	DECLARE_BITMAP(mask, NR_CPUS);
 
-	if (!cpus_empty(mask))
-		send_IPI_mask(&mask, vector);
+	cpumask_copy(to_cpumask(mask), cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), mask);
+
+	if (!cpumask_empty(mask))
+		send_IPI_mask(mask, vector);
 }
 
 static inline void send_IPI_all(int vector)
 {
-	send_IPI_mask(&cpu_online_map, vector);
+	send_IPI_mask(cpu_online_mask, vector);
 }
 
 #endif /* __ASM_NUMAQ_IPI_H */
diff -r 2380271affb1 arch/x86/include/asm/smp.h
--- a/arch/x86/include/asm/smp.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/include/asm/smp.h	Tue Nov 18 20:53:45 2008 +1030
@@ -60,7 +60,7 @@
 	void (*cpu_die)(unsigned int cpu);
 	void (*play_dead)(void);
 
-	void (*send_call_func_ipi)(const cpumask_t *mask);
+	void (*send_call_func_ipi)(const struct cpumask *mask);
 	void (*send_call_func_single_ipi)(int cpu);
 };
 
@@ -123,9 +123,9 @@
 	smp_ops.send_call_func_single_ipi(cpu);
 }
 
-static inline void arch_send_call_function_ipi(cpumask_t mask)
+static inline void arch_send_call_function_ipi(const struct cpumask *mask)
 {
-	smp_ops.send_call_func_ipi(&mask);
+	smp_ops.send_call_func_ipi(mask);
 }
 
 void cpu_disable_common(void);
@@ -138,7 +138,7 @@
 void native_play_dead(void);
 void play_dead_common(void);
 
-void native_send_call_func_ipi(const cpumask_t *mask);
+void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
 
 extern void prefill_possible_map(void);
diff -r 2380271affb1 arch/x86/kernel/apic.c
--- a/arch/x86/kernel/apic.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/apic.c	Tue Nov 18 20:53:45 2008 +1030
@@ -456,7 +456,7 @@
 static void lapic_timer_broadcast(cpumask_t mask)
 {
 #ifdef CONFIG_SMP
-	send_IPI_mask(&mask, LOCAL_TIMER_VECTOR);
+	send_IPI_mask(to_cpumask(cpus_addr(mask)), LOCAL_TIMER_VECTOR);
 #endif
 }
 
diff -r 2380271affb1 arch/x86/kernel/genapic_flat_64.c
--- a/arch/x86/kernel/genapic_flat_64.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/genapic_flat_64.c	Tue Nov 18 20:53:45 2008 +1030
@@ -30,12 +30,12 @@
 	return 1;
 }
 
-static const cpumask_t *flat_target_cpus(void)
+static const struct cpumask *flat_target_cpus(void)
 {
-	return &cpu_online_map;
+	return cpu_online_mask;
 }
 
-static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
 {
 	/* Careful. Some cpus do not strictly honor the set of cpus
 	 * specified in the interrupt destination when using lowest
@@ -45,7 +45,8 @@
 	 * deliver interrupts to the wrong hyperthread when only one
 	 * hyperthread was specified in the interrupt desitination.
 	 */
-	*retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } };
+	cpumask_clear(retmask);
+	cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
 }
 
 /*
@@ -77,9 +78,9 @@
 	local_irq_restore(flags);
 }
 
-static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector)
+static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
 {
-	unsigned long mask = cpus_addr(*cpumask)[0];
+	unsigned long mask = cpumask_bits(cpumask)[0];
 
 	_flat_send_IPI_mask(mask, vector);
 }
@@ -104,7 +105,7 @@
 #endif
 	if (hotplug || vector == NMI_VECTOR) {
 		if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) {
-			unsigned long mask = cpus_addr(cpu_online_map)[0];
+			unsigned long mask = cpumask_bits(cpu_online_mask)[0];
 
 			if (cpu < BITS_PER_LONG)
 				clear_bit(cpu, &mask);
@@ -119,7 +120,7 @@
 static void flat_send_IPI_all(int vector)
 {
 	if (vector == NMI_VECTOR)
-		flat_send_IPI_mask(&cpu_online_map, vector);
+		flat_send_IPI_mask(cpu_online_mask, vector);
 	else
 		__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
 }
@@ -153,9 +154,9 @@
 	return physid_isset(read_xapic_id(), phys_cpu_present_map);
 }
 
-static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-	return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
+	return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
 }
 
 static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -217,23 +218,23 @@
 	return 0;
 }
 
-static const cpumask_t *physflat_target_cpus(void)
+static const struct cpumask *physflat_target_cpus(void)
 {
-	return &cpu_online_map;
+	return cpu_online_mask;
 }
 
-static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
 {
-	cpus_clear(*retmask);
-	cpu_set(cpu, *retmask);
+	cpumask_clear(retmask);
+	cpumask_set_cpu(cpu, retmask);
 }
 
-static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector)
+static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
 {
 	send_IPI_mask_sequence(cpumask, vector);
 }
 
-static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask,
+static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
 					      int vector)
 {
 	send_IPI_mask_allbutself(cpumask, vector);
@@ -241,15 +242,15 @@
 
 static void physflat_send_IPI_allbutself(int vector)
 {
-	send_IPI_mask_allbutself(&cpu_online_map, vector);
+	send_IPI_mask_allbutself(cpu_online_mask, vector);
 }
 
 static void physflat_send_IPI_all(int vector)
 {
-	physflat_send_IPI_mask(&cpu_online_map, vector);
+	physflat_send_IPI_mask(cpu_online_mask, vector);
 }
 
-static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int cpu;
 
@@ -257,7 +258,7 @@
 	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
 	 * May as well be the first.
 	 */
-	cpu = first_cpu(*cpumask);
+	cpu = cpumask_first(cpumask);
 	if ((unsigned)cpu < nr_cpu_ids)
 		return per_cpu(x86_cpu_to_apicid, cpu);
 	else
diff -r 2380271affb1 arch/x86/kernel/genx2apic_cluster.c
--- a/arch/x86/kernel/genx2apic_cluster.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/genx2apic_cluster.c	Tue Nov 18 20:53:45 2008 +1030
@@ -22,18 +22,18 @@
 
 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
 
-static const cpumask_t *x2apic_target_cpus(void)
+static const struct cpumask *x2apic_target_cpus(void)
 {
-	return &cpumask_of_cpu(0);
+	return cpumask_of(0);
 }
 
 /*
  * for now each logical cpu is in its own vector allocation domain.
  */
-static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
 {
-	cpus_clear(*retmask);
-	cpu_set(cpu, *retmask);
+	cpumask_clear(retmask);
+	cpumask_set_cpu(cpu, retmask);
 }
 
 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -55,7 +55,7 @@
  * at once. We have 16 cpu's in a cluster. This will minimize IPI register
  * writes.
  */
-static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
+static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	unsigned long flags;
 	unsigned long query_cpu;
@@ -100,7 +100,7 @@
 
 static void x2apic_send_IPI_all(int vector)
 {
-	x2apic_send_IPI_mask(&cpu_online_map, vector);
+	x2apic_send_IPI_mask(cpu_online_mask, vector);
 }
 
 static int x2apic_apic_id_registered(void)
@@ -108,7 +108,7 @@
 	return 1;
 }
 
-static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int cpu;
 
diff -r 2380271affb1 arch/x86/kernel/genx2apic_phys.c
--- a/arch/x86/kernel/genx2apic_phys.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/genx2apic_phys.c	Tue Nov 18 20:53:45 2008 +1030
@@ -53,7 +53,7 @@
 	x2apic_icr_write(cfg, apicid);
 }
 
-static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
+static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	unsigned long flags;
 	unsigned long query_cpu;
@@ -99,7 +99,7 @@
 
 static void x2apic_send_IPI_all(int vector)
 {
-	x2apic_send_IPI_mask(&cpu_online_map, vector);
+	x2apic_send_IPI_mask(cpu_online_mask, vector);
 }
 
 static int x2apic_apic_id_registered(void)
@@ -107,7 +107,7 @@
 	return 1;
 }
 
-static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int cpu;
 
diff -r 2380271affb1 arch/x86/kernel/genx2apic_uv_x.c
--- a/arch/x86/kernel/genx2apic_uv_x.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/genx2apic_uv_x.c	Tue Nov 18 20:53:45 2008 +1030
@@ -75,15 +75,15 @@
 
 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
 
-static const cpumask_t *uv_target_cpus(void)
+static const struct cpumask *uv_target_cpus(void)
 {
 	return &cpumask_of_cpu(0);
 }
 
-static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
 {
-	cpus_clear(*retmask);
-	cpu_set(cpu, *retmask);
+	cpumask_clear(retmask);
+	cpumask_set_cpu(cpu, retmask);
 }
 
 int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -122,7 +122,7 @@
 	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 }
 
-static void uv_send_IPI_mask(const cpumask_t *mask, int vector)
+static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
 {
 	unsigned int cpu;
 
@@ -164,7 +164,7 @@
 {
 }
 
-static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
 	int cpu;
 
diff -r 2380271affb1 arch/x86/kernel/io_apic.c
--- a/arch/x86/kernel/io_apic.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/io_apic.c	Tue Nov 18 20:53:45 2008 +1030
@@ -2031,7 +2031,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&vector_lock, flags);
-	send_IPI_mask(&cpumask_of_cpu(cpumask_first(to_cpumask(cfg->domain))),
+	send_IPI_mask(cpumask_of(cpumask_first(to_cpumask(cfg->domain))),
 			cfg->vector);
 	spin_unlock_irqrestore(&vector_lock, flags);
 
diff -r 2380271affb1 arch/x86/kernel/ipi.c
--- a/arch/x86/kernel/ipi.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/ipi.c	Tue Nov 18 20:53:45 2008 +1030
@@ -116,18 +116,18 @@
 /*
  * This is only used on smaller machines.
  */
-void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector)
+void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
 {
-	unsigned long mask = cpus_addr(*cpumask)[0];
+	unsigned long mask = cpumask_bits(cpumask)[0];
 	unsigned long flags;
 
 	local_irq_save(flags);
-	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
 	__send_IPI_dest_field(mask, vector);
 	local_irq_restore(flags);
 }
 
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
+void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
 {
 	unsigned long flags;
 	unsigned int query_cpu;
@@ -144,7 +144,7 @@
 	local_irq_restore(flags);
 }
 
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 {
 	unsigned long flags;
 	unsigned int query_cpu;
diff -r 2380271affb1 arch/x86/kernel/smp.c
--- a/arch/x86/kernel/smp.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/smp.c	Tue Nov 18 20:53:45 2008 +1030
@@ -118,15 +118,15 @@
 		WARN_ON(1);
 		return;
 	}
-	send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+	send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
 }
 
 void native_send_call_func_single_ipi(int cpu)
 {
-	send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
+	send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
 }
 
-void native_send_call_func_ipi(const cpumask_t *mask)
+void native_send_call_func_ipi(const struct cpumask *mask)
 {
 	cpumask_t allbutself;
 
diff -r 2380271affb1 arch/x86/kernel/tlb_64.c
--- a/arch/x86/kernel/tlb_64.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/kernel/tlb_64.c	Tue Nov 18 20:53:45 2008 +1030
@@ -191,7 +191,8 @@
 	 * We have to send the IPI only to
 	 * CPUs affected.
 	 */
-	send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
+	send_IPI_mask(to_cpumask(cpus_addr(cpumask)),
+		      INVALIDATE_TLB_VECTOR_START + sender);
 
 	while (!cpus_empty(f->flush_cpumask))
 		cpu_relax();
diff -r 2380271affb1 arch/x86/xen/smp.c
--- a/arch/x86/xen/smp.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/xen/smp.c	Tue Nov 18 20:53:45 2008 +1030
@@ -33,7 +33,7 @@
 #include "xen-ops.h"
 #include "mmu.h"
 
-cpumask_t xen_cpu_initialized_map;
+cpumask_var_t xen_cpu_initialized_map;
 
 static DEFINE_PER_CPU(int, resched_irq);
 static DEFINE_PER_CPU(int, callfunc_irq);
@@ -192,7 +192,10 @@
 	if (xen_smp_intr_init(0))
 		BUG();
 
-	xen_cpu_initialized_map = cpumask_of_cpu(0);
+	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
+		panic("could not allocate xen_cpu_initialized_map\n");
+
+	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
 
 	/* Restrict the possible_map according to max_cpus. */
 	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
@@ -221,7 +224,7 @@
 	struct vcpu_guest_context *ctxt;
 	struct desc_struct *gdt;
 
-	if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
+	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
 		return 0;
 
 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
@@ -408,22 +411,23 @@
 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 }
 
-static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector)
+static void xen_send_IPI_mask(const struct cpumask *mask,
+			      enum ipi_vector vector)
 {
 	unsigned cpu;
 
-	for_each_cpu_mask_and(cpu, *mask, cpu_online_map)
+	for_each_cpu_and(cpu, mask, cpu_online_mask)
 		xen_send_IPI_one(cpu, vector);
 }
 
-static void xen_smp_send_call_function_ipi(const cpumask_t *mask)
+static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
 {
 	int cpu;
 
 	xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
 	/* Make sure other vcpus get a chance to run if they need to. */
-	for_each_cpu_mask_nr(cpu, *mask) {
+	for_each_cpu(cpu, mask) {
 		if (xen_vcpu_stolen(cpu)) {
 			HYPERVISOR_sched_op(SCHEDOP_yield, 0);
 			break;
@@ -433,7 +437,7 @@
 
 static void xen_smp_send_call_function_single_ipi(int cpu)
 {
-	xen_send_IPI_mask(&cpumask_of_cpu(cpu),
+	xen_send_IPI_mask(cpumask_of(cpu),
 			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
 }
 
diff -r 2380271affb1 arch/x86/xen/suspend.c
--- a/arch/x86/xen/suspend.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/xen/suspend.c	Tue Nov 18 20:53:45 2008 +1030
@@ -35,7 +35,8 @@
 			pfn_to_mfn(xen_start_info->console.domU.mfn);
 	} else {
 #ifdef CONFIG_SMP
-		xen_cpu_initialized_map = cpu_online_map;
+		BUG_ON(xen_cpu_initialized_map == NULL);
+		cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
 #endif
 		xen_vcpu_restore();
 	}
diff -r 2380271affb1 arch/x86/xen/xen-ops.h
--- a/arch/x86/xen/xen-ops.h	Tue Nov 18 12:10:58 2008 +1030
+++ b/arch/x86/xen/xen-ops.h	Tue Nov 18 20:53:45 2008 +1030
@@ -58,7 +58,7 @@
 __cpuinit void xen_init_lock_cpu(int cpu);
 void xen_uninit_lock_cpu(int cpu);
 
-extern cpumask_t xen_cpu_initialized_map;
+extern cpumask_var_t xen_cpu_initialized_map;
 #else
 static inline void xen_smp_init(void) {}
 #endif
diff -r 2380271affb1 kernel/smp.c
--- a/kernel/smp.c	Tue Nov 18 12:10:58 2008 +1030
+++ b/kernel/smp.c	Tue Nov 18 20:53:45 2008 +1030
@@ -341,7 +341,7 @@
 	smp_mb();
 
 	/* Send a message to all CPUs in the map */
-	arch_send_call_function_ipi(*to_cpumask(data->cpumask_bits));
+	arch_send_call_function_ipi(to_cpumask(data->cpumask_bits));
 
 	/* optionally wait for the CPUs to complete */
 	if (wait)
