x86: convert more irq functions to take struct cpumask *

This is a broader conversion than the minimal one done in
cpumask:irq-functions-take-cpumask_t-ptr.patch.

Note: We still use cpumasks, this is fixed in the next patch.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/x86/include/asm/genapic_32.h |    2 
 arch/x86/include/asm/genapic_64.h |    2 
 arch/x86/include/asm/irq.h        |    2 
 arch/x86/kernel/hpet.c            |    4 
 arch/x86/kernel/io_apic.c         |  177 +++++++++++++++++++++-----------------
 arch/x86/kernel/irq_32.c          |    6 -
 arch/x86/kernel/irq_64.c          |   10 +-
 arch/x86/kernel/smpboot.c         |    2 
 drivers/xen/events.c              |    4 
 9 files changed, 115 insertions(+), 94 deletions(-)

diff -r bd66efe502c8 arch/x86/include/asm/genapic_32.h
--- a/arch/x86/include/asm/genapic_32.h	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/include/asm/genapic_32.h	Tue Nov 18 12:00:30 2008 +1030
@@ -59,7 +59,7 @@
 	unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
 	unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
 					       const struct cpumask *andmask);
-	void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
+	void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
 
 #ifdef CONFIG_SMP
 	/* ipi */
diff -r bd66efe502c8 arch/x86/include/asm/genapic_64.h
--- a/arch/x86/include/asm/genapic_64.h	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/include/asm/genapic_64.h	Tue Nov 18 12:00:30 2008 +1030
@@ -21,7 +21,7 @@
 	u32 int_dest_mode;
 	int (*apic_id_registered)(void);
 	const cpumask_t *(*target_cpus)(void);
-	void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
+	void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
 	void (*init_apic_ldr)(void);
 	/* ipi */
 	void (*send_IPI_mask)(const cpumask_t *mask, int vector);
diff -r bd66efe502c8 arch/x86/include/asm/irq.h
--- a/arch/x86/include/asm/irq.h	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/include/asm/irq.h	Tue Nov 18 12:00:30 2008 +1030
@@ -37,7 +37,7 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 #include <linux/cpumask.h>
-extern void fixup_irqs(cpumask_t map);
+extern void fixup_irqs(const cpumask_t *map);
 #endif
 
 extern unsigned int do_IRQ(struct pt_regs *regs);
diff -r bd66efe502c8 arch/x86/kernel/hpet.c
--- a/arch/x86/kernel/hpet.c	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/kernel/hpet.c	Tue Nov 18 12:00:30 2008 +1030
@@ -246,7 +246,7 @@
 	 * Start hpet with the boot cpu mask and make it
 	 * global after the IO_APIC has been initialized.
 	 */
-	hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
+	cpumask_copy(&hpet_clockevent.cpumask, cpumask_of(smp_processor_id()));
 	clockevents_register_device(&hpet_clockevent);
 	global_clock_event = &hpet_clockevent;
 	printk(KERN_DEBUG "hpet clockevent registered\n");
@@ -500,7 +500,7 @@
 	/* 5 usec minimum reprogramming delta. */
 	evt->min_delta_ns = 5000;
 
-	evt->cpumask = cpumask_of_cpu(hdev->cpu);
+	cpumask_copy(&evt->cpumask, cpumask_of(hdev->cpu));
 	clockevents_register_device(evt);
 }
 
diff -r bd66efe502c8 arch/x86/kernel/io_apic.c
--- a/arch/x86/kernel/io_apic.c	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/kernel/io_apic.c	Tue Nov 18 12:00:30 2008 +1030
@@ -111,8 +111,8 @@
 struct irq_cfg {
 	unsigned int irq;
 	struct irq_pin_list *irq_2_pin;
-	cpumask_t domain;
-	cpumask_t old_domain;
+	DECLARE_BITMAP(domain, NR_CPUS);
+	DECLARE_BITMAP(old_domain, NR_CPUS);
 	unsigned move_cleanup_count;
 	u8 vector;
 	u8 move_in_progress : 1;
@@ -120,22 +120,22 @@
 
 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
 static struct irq_cfg irq_cfgx[NR_IRQS] = {
-	[0]  = { .irq =  0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
-	[1]  = { .irq =  1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
-	[2]  = { .irq =  2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
-	[3]  = { .irq =  3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR,  },
-	[4]  = { .irq =  4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR,  },
-	[5]  = { .irq =  5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR,  },
-	[6]  = { .irq =  6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR,  },
-	[7]  = { .irq =  7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR,  },
-	[8]  = { .irq =  8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR,  },
-	[9]  = { .irq =  9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR,  },
-	[10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
-	[11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
-	[12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
-	[13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
-	[14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
-	[15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
+	[0]  = { .irq =  0, .domain = CPU_BITS_ALL, .vector = IRQ0_VECTOR,  },
+	[1]  = { .irq =  1, .domain = CPU_BITS_ALL, .vector = IRQ1_VECTOR,  },
+	[2]  = { .irq =  2, .domain = CPU_BITS_ALL, .vector = IRQ2_VECTOR,  },
+	[3]  = { .irq =  3, .domain = CPU_BITS_ALL, .vector = IRQ3_VECTOR,  },
+	[4]  = { .irq =  4, .domain = CPU_BITS_ALL, .vector = IRQ4_VECTOR,  },
+	[5]  = { .irq =  5, .domain = CPU_BITS_ALL, .vector = IRQ5_VECTOR,  },
+	[6]  = { .irq =  6, .domain = CPU_BITS_ALL, .vector = IRQ6_VECTOR,  },
+	[7]  = { .irq =  7, .domain = CPU_BITS_ALL, .vector = IRQ7_VECTOR,  },
+	[8]  = { .irq =  8, .domain = CPU_BITS_ALL, .vector = IRQ8_VECTOR,  },
+	[9]  = { .irq =  9, .domain = CPU_BITS_ALL, .vector = IRQ9_VECTOR,  },
+	[10] = { .irq = 10, .domain = CPU_BITS_ALL, .vector = IRQ10_VECTOR, },
+	[11] = { .irq = 11, .domain = CPU_BITS_ALL, .vector = IRQ11_VECTOR, },
+	[12] = { .irq = 12, .domain = CPU_BITS_ALL, .vector = IRQ12_VECTOR, },
+	[13] = { .irq = 13, .domain = CPU_BITS_ALL, .vector = IRQ13_VECTOR, },
+	[14] = { .irq = 14, .domain = CPU_BITS_ALL, .vector = IRQ14_VECTOR, },
+	[15] = { .irq = 15, .domain = CPU_BITS_ALL, .vector = IRQ15_VECTOR, },
 };
 
 #define for_each_irq_cfg(irq, cfg)		\
@@ -359,7 +359,7 @@
 	}
 }
 
-static int assign_irq_vector(int irq, const cpumask_t *mask);
+static int assign_irq_vector(int irq, const struct cpumask *mask);
 
 static void set_ioapic_affinity_irq(unsigned int irq,
 				    const struct cpumask *mask)
@@ -1034,7 +1034,7 @@
 	spin_unlock(&vector_lock);
 }
 
-static int __assign_irq_vector(int irq, const cpumask_t *mask)
+static int __assign_irq_vector(int irq, const struct cpumask *mask)
 {
 	/*
 	 * NOTE! The local APIC isn't very good at handling
@@ -1051,26 +1051,29 @@
 	unsigned int old_vector;
 	int cpu;
 	struct irq_cfg *cfg;
-	cpumask_t tmp_mask;
+	cpumask_var_t tmp_mask;
 
 	cfg = irq_cfg(irq);
 	if ((cfg->move_in_progress) || cfg->move_cleanup_count)
 		return -EBUSY;
 
+	if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
+		return -ENOMEM;
+
 	old_vector = cfg->vector;
 	if (old_vector) {
-		cpus_and(tmp_mask, *mask, cpu_online_map);
-		cpus_and(tmp_mask, cfg->domain, tmp_mask);
-		if (!cpus_empty(tmp_mask))
+		cpumask_and(tmp_mask, mask, cpu_online_mask);
+		cpumask_and(tmp_mask, tmp_mask, to_cpumask(cfg->domain));
+		if (!cpumask_empty(tmp_mask))
 			return 0;
 	}
 
 	/* Only try and allocate irqs on cpus that are present */
-	for_each_cpu_and(cpu, mask, &cpu_online_map) {
+	for_each_cpu_and(cpu, mask, cpu_online_mask) {
 		int new_cpu;
 		int vector, offset;
 
-		vector_allocation_domain(cpu, &tmp_mask);
+		vector_allocation_domain(cpu, tmp_mask);
 
 		vector = current_vector;
 		offset = current_offset;
@@ -1090,7 +1093,7 @@
 		if (vector == SYSCALL_VECTOR)
 			goto next;
 #endif
-		for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map)
+		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
 			if (per_cpu(vector_irq, new_cpu)[vector] != -1)
 				goto next;
 		/* Found one! */
@@ -1098,12 +1101,13 @@
 		current_offset = offset;
 		if (old_vector) {
 			cfg->move_in_progress = 1;
-			cfg->old_domain = cfg->domain;
+			cpumask_copy(to_cpumask(cfg->old_domain),
+				     to_cpumask(cfg->domain));
 		}
-		for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map)
+		for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
 			per_cpu(vector_irq, new_cpu)[vector] = irq;
 		cfg->vector = vector;
-		cfg->domain = tmp_mask;
+		cpumask_copy(to_cpumask(cfg->domain), tmp_mask);
 		return 0;
 	}
 	return -ENOSPC;
@@ -1123,19 +1127,17 @@
 static void __clear_irq_vector(int irq)
 {
 	struct irq_cfg *cfg;
-	cpumask_t mask;
 	int cpu, vector;
 
 	cfg = irq_cfg(irq);
 	BUG_ON(!cfg->vector);
 
 	vector = cfg->vector;
-	cpus_and(mask, cfg->domain, cpu_online_map);
-	for_each_cpu_mask_nr(cpu, mask)
+	for_each_cpu_and(cpu, cpu_online_mask, to_cpumask(cfg->domain))
 		per_cpu(vector_irq, cpu)[vector] = -1;
 
 	cfg->vector = 0;
-	cpus_clear(cfg->domain);
+	cpumask_clear(to_cpumask(cfg->domain));
 }
 
 void __setup_vector_irq(int cpu)
@@ -1147,7 +1149,7 @@
 
 	/* Mark the inuse vectors */
 	for_each_irq_cfg(irq, cfg) {
-		if (!cpu_isset(cpu, cfg->domain))
+		if (!cpumask_test_cpu(cpu, to_cpumask(cfg->domain)))
 			continue;
 		vector = cfg->vector;
 		per_cpu(vector_irq, cpu)[vector] = irq;
@@ -1159,7 +1161,7 @@
 			continue;
 
 		cfg = irq_cfg(irq);
-		if (!cpu_isset(cpu, cfg->domain))
+		if (!cpumask_test_cpu(cpu, to_cpumask(cfg->domain)))
 			per_cpu(vector_irq, cpu)[vector] = -1;
 	}
 }
@@ -1298,18 +1300,17 @@
 {
 	struct irq_cfg *cfg;
 	struct IO_APIC_route_entry entry;
-	cpumask_t mask;
+	DECLARE_BITMAP(mask, NR_CPUS);
 
 	if (!IO_APIC_IRQ(irq))
 		return;
 
 	cfg = irq_cfg(irq);
 
-	mask = *TARGET_CPUS;
-	if (assign_irq_vector(irq, &mask))
+	if (assign_irq_vector(irq, TARGET_CPUS))
 		return;
 
-	cpus_and(mask, cfg->domain, mask);
+	cpumask_and(to_cpumask(mask), to_cpumask(cfg->domain), TARGET_CPUS);
 
 	apic_printk(APIC_VERBOSE,KERN_DEBUG
 		    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1319,8 +1320,9 @@
 
 
 	if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
-			       cpu_mask_to_apicid(&mask), trigger, polarity,
-			       cfg->vector)) {
+				cpu_mask_to_apicid(
+					(const cpumask_t *)to_cpumask(mask)),
+				trigger, polarity, cfg->vector)) {
 		printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
 		       mp_ioapics[apic].mp_apicid, pin);
 		__clear_irq_vector(irq);
@@ -2025,7 +2027,8 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&vector_lock, flags);
-	send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
+	send_IPI_mask(&cpumask_of_cpu(cpumask_first(to_cpumask(cfg->domain))),
+			cfg->vector);
 	spin_unlock_irqrestore(&vector_lock, flags);
 
 	return 1;
@@ -2074,18 +2077,18 @@
  * as simple as edge triggered migration and we can do the irq migration
  * with a simple atomic update to IO-APIC RTE.
  */
-static void migrate_ioapic_irq(int irq, const cpumask_t *mask)
+static void migrate_ioapic_irq(int irq, const struct cpumask *mask)
 {
 	struct irq_cfg *cfg;
 	struct irq_desc *desc;
-	cpumask_t tmpmask;
+	DECLARE_BITMAP(tmp_mask, NR_CPUS);
 	struct irte irte;
 	int modify_ioapic_rte;
 	unsigned int dest;
 	unsigned long flags;
 
-	cpus_and(tmpmask, *mask, cpu_online_map);
-	if (cpus_empty(tmpmask))
+	cpumask_and(to_cpumask(tmp_mask), mask, cpu_online_mask);
+	if (cpumask_empty(to_cpumask(tmp_mask)))
 		return;
 
 	if (get_irte(irq, &irte))
@@ -2095,8 +2098,8 @@
 		return;
 
 	cfg = irq_cfg(irq);
-	cpus_and(tmpmask, cfg->domain, *mask);
-	dest = cpu_mask_to_apicid(&tmpmask);
+	cpumask_and(to_cpumask(tmp_mask), to_cpumask(cfg->domain), mask);
+	dest = cpu_mask_to_apicid(to_cpumask(tmp_mask));
 
 	desc = irq_to_desc(irq);
 	modify_ioapic_rte = desc->status & IRQ_LEVEL;
@@ -2115,13 +2118,14 @@
 	modify_irte(irq, &irte);
 
 	if (cfg->move_in_progress) {
-		cpus_and(tmpmask, cfg->old_domain, cpu_online_map);
-		cfg->move_cleanup_count = cpus_weight(tmpmask);
-		send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR);
+		cpumask_and(to_cpumask(tmp_mask),
+			    to_cpumask(cfg->old_domain), cpu_online_mask);
+		cfg->move_cleanup_count = cpumask_weight(to_cpumask(tmp_mask));
+		send_IPI_mask(to_cpumask(tmp_mask), IRQ_MOVE_CLEANUP_VECTOR);
 		cfg->move_in_progress = 0;
 	}
 
-	desc->affinity = *mask;
+	cpumask_copy(&desc->affinity, mask);
 }
 
 static int migrate_irq_remapped_level(int irq)
@@ -2147,7 +2151,7 @@
 
 	ret = 0;
 	desc->status &= ~IRQ_MOVE_PENDING;
-	cpus_clear(desc->pending_mask);
+	cpumask_clear(&desc->pending_mask);
 
 unmask:
 	unmask_IO_APIC_irq(irq);
@@ -2221,7 +2225,8 @@
 		if (!cfg->move_cleanup_count)
 			goto unlock;
 
-		if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
+		if ((vector == cfg->vector) &&
+		    cpumask_test_cpu(me, to_cpumask(cfg->domain)))
 			goto unlock;
 
 		__get_cpu_var(vector_irq)[vector] = -1;
@@ -2243,12 +2248,16 @@
 
 	vector = ~get_irq_regs()->orig_ax;
 	me = smp_processor_id();
-	if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
-		cpumask_t cleanup_mask;
+	if ((vector == cfg->vector) &&
+	    cpumask_test_cpu(me, to_cpumask(cfg->domain))) {
+		DECLARE_BITMAP(cleanup_mask, NR_CPUS);
 
-		cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
-		cfg->move_cleanup_count = cpus_weight(cleanup_mask);
-		send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+		cpumask_and(to_cpumask(cleanup_mask),
+			    to_cpumask(cfg->old_domain), cpu_online_mask);
+		cfg->move_cleanup_count =
+				cpumask_weight(to_cpumask(cleanup_mask));
+		send_IPI_mask((const cpumask_t *)to_cpumask(cleanup_mask),
+				IRQ_MOVE_CLEANUP_VECTOR);
 		cfg->move_in_progress = 0;
 	}
 }
@@ -2947,16 +2956,13 @@
 	struct irq_cfg *cfg;
 	int err;
 	unsigned dest;
-	cpumask_t tmp;
 
-	tmp = *TARGET_CPUS;
-	err = assign_irq_vector(irq, &tmp);
+	err = assign_irq_vector(irq, TARGET_CPUS);
 	if (err)
 		return err;
 
 	cfg = irq_cfg(irq);
-	cpus_and(tmp, cfg->domain, tmp);
-	dest = cpu_mask_to_apicid(&tmp);
+	dest = cpu_mask_to_apicid_and(to_cpumask(cfg->domain), TARGET_CPUS);
 
 #ifdef CONFIG_INTR_REMAP
 	if (irq_remapped(irq)) {
@@ -3050,7 +3056,7 @@
 {
 	struct irq_cfg *cfg;
 	unsigned int dest;
-	cpumask_t tmp, cleanup_mask;
+	cpumask_t tmp;
 	struct irte irte;
 	struct irq_desc *desc;
 
@@ -3064,7 +3070,7 @@
 		return;
 
 	cfg = irq_cfg(irq);
-	cpumask_and(&tmp, &cfg->domain, mask);
+	cpumask_and(&tmp, to_cpumask(cfg->domain), mask);
 	dest = cpu_mask_to_apicid(&tmp);
 
 	irte.vector = cfg->vector;
@@ -3081,9 +3087,24 @@
 	 * vector allocation.
 	 */
 	if (cfg->move_in_progress) {
-		cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
-		cfg->move_cleanup_count = cpus_weight(cleanup_mask);
-		send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+		const struct cpumask *oldm = to_cpumask(cfg->old_domain);
+		cpumask_var_t cleanup_mask;
+
+		if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_KERNEL))) {
+			/* Slow Path */
+			cfg->move_cleanup_count = 0;
+			for_each_cpu_and(dest, cpu_online_mask, oldm)
+				cfg->move_cleanup_count++;
+
+			for_each_cpu_and(dest, cpu_online_mask, oldm)
+				send_IPI_mask(cpumask_of(dest),
+					      IRQ_MOVE_CLEANUP_VECTOR);
+		} else {
+			cpumask_and(cleanup_mask, oldm, cpu_online_mask);
+			cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
+			send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+			free_cpumask_var(cleanup_mask);
+		}
 		cfg->move_in_progress = 0;
 	}
 
@@ -3365,7 +3386,7 @@
 		return;
 
 	cfg = irq_cfg(irq);
-	cpumask_and(&tmp, &cfg->domain, mask);
+	cpumask_and(&tmp, to_cpumask(cfg->domain), mask);
 	dest = cpu_mask_to_apicid(&tmp);
 
 	hpet_msi_read(irq, &msg);
@@ -3445,7 +3466,7 @@
 		return;
 
 	cfg = irq_cfg(irq);
-	cpumask_and(&tmp, &cfg->domain, mask);
+	cpumask_and(&tmp, to_cpumask(cfg->domain), mask);
 	dest = cpu_mask_to_apicid(&tmp);
 
 	target_ht_irq(irq, dest, cfg->vector);
@@ -3469,17 +3490,17 @@
 {
 	struct irq_cfg *cfg;
 	int err;
-	cpumask_t tmp;
+	DECLARE_BITMAP(tmp, NR_CPUS);
 
-	tmp = *TARGET_CPUS;
-	err = assign_irq_vector(irq, &tmp);
+	err = assign_irq_vector(irq, TARGET_CPUS);
 	if (!err) {
 		struct ht_irq_msg msg;
 		unsigned dest;
 
 		cfg = irq_cfg(irq);
-		cpus_and(tmp, cfg->domain, tmp);
-		dest = cpu_mask_to_apicid(&tmp);
+		cpumask_and(to_cpumask(tmp), to_cpumask(cfg->domain),
+					     TARGET_CPUS);
+		dest = cpu_mask_to_apicid((const cpumask_t *)to_cpumask(tmp));
 
 		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
 
@@ -3515,7 +3536,7 @@
 int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
 		       unsigned long mmr_offset)
 {
-	const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu);
+	const struct cpumask *eligible_cpu = cpumask_of(cpu);
 	struct irq_cfg *cfg;
 	int mmr_pnode;
 	unsigned long mmr_value;
diff -r bd66efe502c8 arch/x86/kernel/irq_32.c
--- a/arch/x86/kernel/irq_32.c	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/kernel/irq_32.c	Tue Nov 18 12:00:30 2008 +1030
@@ -233,7 +233,7 @@
 #ifdef CONFIG_HOTPLUG_CPU
 #include <mach_apic.h>
 
-void fixup_irqs(cpumask_t map)
+void fixup_irqs(const struct cpumask *map)
 {
 	unsigned int irq;
 	static int warned;
@@ -245,10 +245,10 @@
 		if (irq == 2)
 			continue;
 
-		cpus_and(mask, desc->affinity, map);
+		cpumask_and(&mask, &desc->affinity, map);
 		if (any_online_cpu(mask) == NR_CPUS) {
 			printk("Breaking affinity for irq %i\n", irq);
-			mask = map;
+			mask = *map;
 		}
 		if (desc->chip->set_affinity)
 			desc->chip->set_affinity(irq, &mask);
diff -r bd66efe502c8 arch/x86/kernel/irq_64.c
--- a/arch/x86/kernel/irq_64.c	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/kernel/irq_64.c	Tue Nov 18 12:00:30 2008 +1030
@@ -83,7 +83,7 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(cpumask_t map)
+void fixup_irqs(const cpumask_t *map)
 {
 	unsigned int irq;
 	static int warned;
@@ -101,22 +101,22 @@
 		spin_lock(&desc->lock);
 
 		if (!irq_has_action(irq) ||
-		    cpus_equal(desc->affinity, map)) {
+		    cpumask_equal(&desc->affinity, map)) {
 			spin_unlock(&desc->lock);
 			continue;
 		}
 
-		cpus_and(mask, desc->affinity, map);
+		cpumask_and(&mask, &desc->affinity, map);
 		if (cpus_empty(mask)) {
 			break_affinity = 1;
-			mask = map;
+			mask = *map;
 		}
 
 		if (desc->chip->mask)
 			desc->chip->mask(irq);
 
 		if (desc->chip->set_affinity)
-			desc->chip->set_affinity(irq, mask);
+			desc->chip->set_affinity(irq, &mask);
 		else if (!(warned++))
 			set_affinity = 0;
 
diff -r bd66efe502c8 arch/x86/kernel/smpboot.c
--- a/arch/x86/kernel/smpboot.c	Tue Nov 18 11:52:00 2008 +1030
+++ b/arch/x86/kernel/smpboot.c	Tue Nov 18 12:00:30 2008 +1030
@@ -1349,7 +1349,7 @@
 	lock_vector_lock();
 	remove_cpu_from_maps(cpu);
 	unlock_vector_lock();
-	fixup_irqs(cpu_online_map);
+	fixup_irqs(cpu_online_mask);
 }
 
 int native_cpu_disable(void)
diff -r bd66efe502c8 drivers/xen/events.c
--- a/drivers/xen/events.c	Tue Nov 18 11:52:00 2008 +1030
+++ b/drivers/xen/events.c	Tue Nov 18 12:00:30 2008 +1030
@@ -125,7 +125,7 @@
 
 	BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-	irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
+	cpumask_copy(&irq_to_desc(irq->affinity), cpumask_of(cpu));
 #endif
 
 	__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
@@ -142,7 +142,7 @@
 
 	/* By default all event channels notify CPU#0. */
 	for_each_irq_desc(i, desc)
-		desc->affinity = cpumask_of_cpu(0);
+		cpumask_copy(&desc->affinity, cpumask_of(0));
 #endif
 
 	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
