cpumask: make irq_set_affinity() take a const struct cpumask *

Not much point with gentle transition here: the struct irq_chip's
setaffinity method signature needs to change.

Fortunately, not widely used code.

Note: I save a temporary in set_ioapic_affinity_irq() by mangling
irq_desc[irq].affinity directly.  Ingo, does this break anything?

Sugned-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/mips/kernel/cevt-bcm1480.c |    2 -
 arch/mips/kernel/cevt-sb1250.c  |    2 -
 arch/sparc64/kernel/of_device.c |    4 --
 arch/sparc64/kernel/pci_msi.c   |    4 --
 arch/x86/kernel/io_apic_32.c    |   59 ++++++++++++++++------------------------
 arch/x86/kernel/irq_32.c        |    2 -
 drivers/xen/events.c            |    2 -
 include/linux/interrupt.h       |    4 +-
 include/linux/irq.h             |   10 +++---
 kernel/irq/manage.c             |   15 ++++------
 kernel/irq/migration.c          |   17 ++++++-----
 kernel/irq/proc.c               |   29 ++++++++++++-------
 kernel/time/tick-common.c       |    6 ++--
 13 files changed, 76 insertions(+), 80 deletions(-)

diff -r b871ae111f35 arch/sparc64/kernel/of_device.c
--- a/arch/sparc64/kernel/of_device.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/arch/sparc64/kernel/of_device.c	Mon Sep 29 21:56:17 2008 +1000
@@ -748,9 +748,7 @@ out:
 out:
 	nid = of_node_to_nid(dp);
 	if (nid != -1) {
-		cpumask_t numa_mask = node_to_cpumask(nid);
-
-		irq_set_affinity(irq, numa_mask);
+		irq_set_affinity(irq, node_to_cpumask_ptr(nid));
 	}
 
 	return irq;
diff -r b871ae111f35 arch/sparc64/kernel/pci_msi.c
--- a/arch/sparc64/kernel/pci_msi.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/arch/sparc64/kernel/pci_msi.c	Mon Sep 29 21:56:17 2008 +1000
@@ -286,9 +286,7 @@ static int bringup_one_msi_queue(struct 
 
 	nid = pbm->numa_node;
 	if (nid != -1) {
-		cpumask_t numa_mask = node_to_cpumask(nid);
-
-		irq_set_affinity(irq, numa_mask);
+		irq_set_affinity(irq, node_to_cpumask_ptr(nid));
 	}
 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
 			  "MSIQ",
diff -r b871ae111f35 arch/x86/kernel/io_apic_32.c
--- a/arch/x86/kernel/io_apic_32.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/arch/x86/kernel/io_apic_32.c	Mon Sep 29 21:56:17 2008 +1000
@@ -334,22 +334,22 @@ static void clear_IO_APIC(void)
 }
 
 #ifdef CONFIG_SMP
-static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
+static void set_ioapic_affinity_irq(unsigned int irq,
+				    const struct cpumask *cpumask)
 {
 	unsigned long flags;
 	int pin;
 	struct irq_pin_list *entry = irq_2_pin + irq;
 	unsigned int apicid_value;
-	cpumask_t tmp;
 
-	cpus_and(tmp, cpumask, cpu_online_map);
-	if (cpus_empty(tmp))
-		tmp = TARGET_CPUS;
+	spin_lock_irqsave(&ioapic_lock, flags);
+	cpumask_and(&irq_desc[irq].affinity, cpumask, cpu_online_mask);
+	if (cpumask_empty(&irq_desc[irq].affinity))
+		irq_desc[irq].affinity = TARGET_CPUS;
+	apicid_value = cpu_mask_to_apicid(irq_desc[irq].affinity);
 
-	apicid_value = cpu_mask_to_apicid(tmp);
 	/* Prepare to do the io_apic_write */
 	apicid_value = apicid_value << 24;
-	spin_lock_irqsave(&ioapic_lock, flags);
 	for (;;) {
 		pin = entry->pin;
 		if (pin == -1)
@@ -359,7 +359,6 @@ static void set_ioapic_affinity_irq(unsi
 			break;
 		entry = irq_2_pin + entry->next;
 	}
-	irq_desc[irq].affinity = tmp;
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -392,7 +391,7 @@ static struct irq_cpu_info {
 #define IDLE_ENOUGH(cpu,now) \
 	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
 
-#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
+#define IRQ_ALLOWED(cpu, allowed_mask)	cpumask_test_cpu(cpu, allowed_mask)
 
 #define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
 
@@ -400,12 +399,12 @@ static cpumask_t balance_irq_affinity[NR
 	[0 ... NR_IRQS-1] = CPU_MASK_ALL
 };
 
-void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
+void set_balance_irq_affinity(unsigned int irq, const struct cpumask *mask)
 {
-	balance_irq_affinity[irq] = mask;
+	cpumask_copy(&balance_irq_affinity[irq], mask);
 }
 
-static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
+static unsigned long move(int curr_cpu, const struct cpumask *allowed_mask,
 			unsigned long now, int direction)
 {
 	int search_idle = 1;
@@ -435,16 +434,14 @@ static inline void balance_irq(int cpu, 
 static inline void balance_irq(int cpu, int irq)
 {
 	unsigned long now = jiffies;
-	cpumask_t allowed_mask;
 	unsigned int new_cpu;
 
 	if (irqbalance_disabled)
 		return;
 
-	cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
-	new_cpu = move(cpu, allowed_mask, now, 1);
+	new_cpu = move(cpu, &balance_irq_affinity[irq], now, 1);
 	if (cpu != new_cpu)
-		set_pending_irq(irq, cpumask_of_cpu(new_cpu));
+		set_pending_irq(irq, cpumask_of(new_cpu));
 }
 
 static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
@@ -479,7 +476,6 @@ static void do_irq_balance(void)
 	int tmp_loaded, first_attempt = 1;
 	unsigned long tmp_cpu_irq;
 	unsigned long imbalance = 0;
-	cpumask_t allowed_mask, target_cpu_mask, tmp;
 
 	for_each_possible_cpu(i) {
 		int package_index;
@@ -623,15 +619,10 @@ tryanotherirq:
 		}
 	}
 
-	cpus_and(allowed_mask,
-		cpu_online_map,
-		balance_irq_affinity[selected_irq]);
-	target_cpu_mask = cpumask_of_cpu(min_loaded);
-	cpus_and(tmp, target_cpu_mask, allowed_mask);
-
-	if (!cpus_empty(tmp)) {
+	if (cpu_online(min_loaded) &&
+	    cpumask_test_cpu(min_loaded, &balance_irq_affinity[selected_irq])) {
 		/* mark for change destination */
-		set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
+		set_pending_irq(selected_irq, cpumask_of(min_loaded));
 
 		/* Since we made a change, come back sooner to
 		 * check for more variation.
@@ -661,7 +652,7 @@ static int balanced_irq(void *unused)
 	/* push everything to CPU 0 to give us a starting point.  */
 	for (i = 0 ; i < NR_IRQS ; i++) {
 		irq_desc[i].pending_mask = cpumask_of_cpu(0);
-		set_pending_irq(i, cpumask_of_cpu(0));
+		set_pending_irq(i, cpumask_of(0));
 	}
 
 	set_freezable();
@@ -920,11 +911,13 @@ void __init setup_ioapic_dest(void)
 
 	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
 		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
+			cpumask_t tmp;
 			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
 			if (irq_entry == -1)
 				continue;
 			irq = pin_2_irq(irq_entry, ioapic, pin);
-			set_ioapic_affinity_irq(irq, TARGET_CPUS);
+			tmp = TARGET_CPUS;
+			set_ioapic_affinity_irq(irq, &tmp);
 		}
 
 	}
@@ -2602,19 +2595,17 @@ static void target_ht_irq(unsigned int i
 	write_ht_irq_msg(irq, &msg);
 }
 
-static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
+static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
 {
 	unsigned int dest;
-	cpumask_t tmp;
 
-	cpus_and(tmp, mask, cpu_online_map);
-	if (cpus_empty(tmp))
-		tmp = TARGET_CPUS;
+	cpumask_and(&irq_desc[irq].affinity, mask, cpu_online_mask);
+	if (cpumask_empty(&irq_desc[irq].affinity))
+		irq_desc[irq].affinity = TARGET_CPUS;
 
-	dest = cpu_mask_to_apicid(tmp);
+	dest = cpu_mask_to_apicid(irq_desc[irq].affinity);
 
 	target_ht_irq(irq, dest);
-	irq_desc[irq].affinity = tmp;
 }
 #endif
 
diff -r b871ae111f35 arch/x86/kernel/irq_32.c
--- a/arch/x86/kernel/irq_32.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/arch/x86/kernel/irq_32.c	Mon Sep 29 21:56:17 2008 +1000
@@ -407,7 +407,7 @@ void fixup_irqs(cpumask_t map)
 			mask = map;
 		}
 		if (irq_desc[irq].chip->set_affinity)
-			irq_desc[irq].chip->set_affinity(irq, mask);
+			irq_desc[irq].chip->set_affinity(irq, &mask);
 		else if (irq_desc[irq].action && !(warned++))
 			printk("Cannot set affinity for irq %i\n", irq);
 	}
diff -r b871ae111f35 drivers/xen/events.c
--- a/drivers/xen/events.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/drivers/xen/events.c	Mon Sep 29 21:56:17 2008 +1000
@@ -578,7 +578,7 @@ void rebind_evtchn_irq(int evtchn, int i
 	spin_unlock(&irq_mapping_update_lock);
 
 	/* new event channels are always bound to cpu 0 */
-	irq_set_affinity(irq, cpumask_of_cpu(0));
+	irq_set_affinity(irq, cpumask_of(0));
 
 	/* Unmask the event channel. */
 	enable_irq(irq);
diff -r b871ae111f35 include/linux/interrupt.h
--- a/include/linux/interrupt.h	Mon Sep 29 14:33:55 2008 +1000
+++ b/include/linux/interrupt.h	Mon Sep 29 21:56:17 2008 +1000
@@ -106,13 +106,13 @@ extern void enable_irq(unsigned int irq)
 
 extern cpumask_t irq_default_affinity;
 
-extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
 extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
 
 #else /* CONFIG_SMP */
 
-static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
 {
 	return -EINVAL;
 }
diff -r b871ae111f35 include/linux/irq.h
--- a/include/linux/irq.h	Mon Sep 29 14:33:55 2008 +1000
+++ b/include/linux/irq.h	Mon Sep 29 21:56:17 2008 +1000
@@ -110,7 +110,7 @@ struct irq_chip {
 	void		(*eoi)(unsigned int irq);
 
 	void		(*end)(unsigned int irq);
-	void		(*set_affinity)(unsigned int irq, cpumask_t dest);
+	void		(*set_affinity)(unsigned int irq, const struct cpumask *dest);
 	int		(*retrigger)(unsigned int irq);
 	int		(*set_type)(unsigned int irq, unsigned int flow_type);
 	int		(*set_wake)(unsigned int irq, unsigned int on);
@@ -205,7 +205,7 @@ extern int setup_irq(unsigned int irq, s
 
 #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
 
-void set_pending_irq(unsigned int irq, cpumask_t mask);
+void set_pending_irq(unsigned int irq, const struct cpumask *mask);
 void move_native_irq(int irq);
 void move_masked_irq(int irq);
 
@@ -223,7 +223,7 @@ static inline void move_masked_irq(int i
 {
 }
 
-static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
+static inline void set_pending_irq(unsigned int irq, const struct cpumask *mask)
 {
 }
 
@@ -237,9 +237,9 @@ static inline void set_pending_irq(unsig
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_IRQBALANCE
-extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
+extern void set_balance_irq_affinity(unsigned int irq, const struct cpumask *mask);
 #else
-static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
+static inline void set_balance_irq_affinity(unsigned int irq, const struct cpumask *mask)
 {
 }
 #endif
diff -r b871ae111f35 kernel/irq/manage.c
--- a/kernel/irq/manage.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/kernel/irq/manage.c	Mon Sep 29 21:56:17 2008 +1000
@@ -79,7 +79,7 @@ int irq_can_set_affinity(unsigned int ir
  *	@cpumask:	cpumask
  *
  */
-int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 {
 	struct irq_desc *desc = irq_desc + irq;
 
@@ -91,7 +91,7 @@ int irq_set_affinity(unsigned int irq, c
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	set_pending_irq(irq, cpumask);
 #else
-	desc->affinity = cpumask;
+	copy_cpumask(&desc->affinity, cpumask);
 	desc->chip->set_affinity(irq, cpumask);
 #endif
 	return 0;
@@ -103,17 +103,14 @@ int irq_set_affinity(unsigned int irq, c
  */
 int irq_select_affinity(unsigned int irq)
 {
-	cpumask_t mask;
-
 	if (!irq_can_set_affinity(irq))
 		return 0;
 
-	cpus_and(mask, cpu_online_map, irq_default_affinity);
+	cpumask_and(&irq_desc[irq].affinity,
+		    cpu_online_mask, &irq_default_affinity);
+	irq_desc[irq].chip->set_affinity(irq, &irq_desc[irq].affinity);
 
-	irq_desc[irq].affinity = mask;
-	irq_desc[irq].chip->set_affinity(irq, mask);
-
-	set_balance_irq_affinity(irq, mask);
+	set_balance_irq_affinity(irq, &irq_desc[irq].affinity);
 	return 0;
 }
 #endif
diff -r b871ae111f35 kernel/irq/migration.c
--- a/kernel/irq/migration.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/kernel/irq/migration.c	Mon Sep 29 21:56:17 2008 +1000
@@ -1,21 +1,21 @@
 
 #include <linux/irq.h>
 
-void set_pending_irq(unsigned int irq, cpumask_t mask)
+void set_pending_irq(unsigned int irq, const struct cpumask *mask)
 {
 	struct irq_desc *desc = irq_desc + irq;
 	unsigned long flags;
 
 	spin_lock_irqsave(&desc->lock, flags);
 	desc->status |= IRQ_MOVE_PENDING;
-	irq_desc[irq].pending_mask = mask;
+	cpumask_copy(&irq_desc[irq].pending_mask, mask);
 	spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 void move_masked_irq(int irq)
 {
 	struct irq_desc *desc = irq_desc + irq;
-	cpumask_t tmp;
+	cpumask_var_t tmp;
 
 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
 		return;
@@ -38,7 +38,10 @@ void move_masked_irq(int irq)
 
 	assert_spin_locked(&desc->lock);
 
-	cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
+	if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
+		return;
+
+	cpumask_and(tmp, &irq_desc[irq].pending_mask, cpu_online_mask);
 
 	/*
 	 * If there was a valid mask to work with, please
@@ -52,10 +55,10 @@ void move_masked_irq(int irq)
 	 * For correct operation this depends on the caller
 	 * masking the irqs.
 	 */
-	if (likely(!cpus_empty(tmp))) {
-		desc->chip->set_affinity(irq,tmp);
+	if (likely(!cpumask_empty(tmp))) {
+		desc->chip->set_affinity(irq, tmp);
 	}
-	cpus_clear(irq_desc[irq].pending_mask);
+	cpumask_clear(&irq_desc[irq].pending_mask);
 }
 
 void move_native_irq(int irq)
diff -r b871ae111f35 kernel/irq/proc.c
--- a/kernel/irq/proc.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/kernel/irq/proc.c	Mon Sep 29 21:56:17 2008 +1000
@@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(s
 		const char __user *buffer, size_t count, loff_t *pos)
 {
 	unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
-	cpumask_t new_value;
+	cpumask_var_t new_value;
 	int err;
 
 	if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
 	    irq_balancing_disabled(irq))
 		return -EIO;
 
-	err = cpumask_parse_user(buffer, count, &new_value);
+	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+		return -ENOMEM;
+
+	err = cpumask_parse_user(buffer, count, new_value);
 	if (err)
-		return err;
+		goto free_cpumask;
 
-	if (!is_affinity_mask_valid(new_value))
-		return -EINVAL;
+	if (!is_affinity_mask_valid(new_value)) {
+		err = -EINVAL;
+		goto free_cpumask;
+	}
 
 	/*
 	 * Do not allow disabling IRQs completely - it's a too easy
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!cpus_intersects(new_value, cpu_online_map))
+	if (!cpumask_intersects(new_value, cpu_online_mask)) {
 		/* Special case for empty set - allow the architecture
 		   code to set default SMP affinity. */
-		return irq_select_affinity(irq) ? -EINVAL : count;
+		err = irq_select_affinity(irq) ? -EINVAL : count;
+	} else {
+		irq_set_affinity(irq, new_value);
+		err = count;
+	}
 
-	irq_set_affinity(irq, new_value);
-
-	return count;
+free_cpumask:
+	free_cpumask_var(new_value);
+	return err;
 }
 
 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
diff -r b871ae111f35 kernel/time/tick-common.c
--- a/kernel/time/tick-common.c	Mon Sep 29 14:33:55 2008 +1000
+++ b/kernel/time/tick-common.c	Mon Sep 29 21:56:17 2008 +1000
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_ev
  */
 static void tick_setup_device(struct tick_device *td,
 			      struct clock_event_device *newdev, int cpu,
-			      const cpumask_t *cpumask)
+			      const struct cpumask *cpumask)
 {
 	ktime_t next_event;
 	void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tic
 	 * When the device is not per cpu, pin the interrupt to the
 	 * current cpu:
 	 */
-	if (!cpus_equal(newdev->cpumask, *cpumask))
-		irq_set_affinity(newdev->irq, *cpumask);
+	if (!cpumask_equal(&newdev->cpumask, cpumask))
+		irq_set_affinity(newdev->irq, cpumask);
 
 	/*
 	 * When global broadcasting is active, check if the current
diff -r e2f34a66405f arch/mips/kernel/cevt-bcm1480.c
--- a/arch/mips/kernel/cevt-bcm1480.c	Mon Sep 29 21:56:17 2008 +1000
+++ b/arch/mips/kernel/cevt-bcm1480.c	Mon Sep 29 22:11:31 2008 +1000
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(vo
 	action->name	= name;
 	action->dev_id	= cd;
 
-	irq_set_affinity(irq, cpumask_of_cpu(cpu));
+	irq_set_affinity(irq, cpumask_of(cpu));
 	setup_irq(irq, action);
 }
diff -r e2f34a66405f arch/mips/kernel/cevt-sb1250.c
--- a/arch/mips/kernel/cevt-sb1250.c	Mon Sep 29 21:56:17 2008 +1000
+++ b/arch/mips/kernel/cevt-sb1250.c	Mon Sep 29 22:11:31 2008 +1000
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(vo
 	action->name	= name;
 	action->dev_id	= cd;
 
-	irq_set_affinity(irq, cpumask_of_cpu(cpu));
+	irq_set_affinity(irq, cpumask_of(cpu));
 	setup_irq(irq, action);
 }
