cpumask: convert misc driver functions

Convert misc driver functions to use struct cpumask.

To Do:
  - Convert iucv_buffer_cpumask to cpumask_var_t.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 drivers/base/cpu.c                           |    2 -
 drivers/infiniband/hw/ehca/ehca_irq.c        |    8 +++----
 drivers/infiniband/hw/ipath/ipath_file_ops.c |    8 +++----
 drivers/misc/sgi-xp/xpc_main.c               |    2 -
 drivers/net/sfc/efx.c                        |   27 +++++++++++++++-----------
 drivers/oprofile/buffer_sync.c               |   22 +++++++++++++++++----
 drivers/oprofile/buffer_sync.h               |    4 +++
 drivers/oprofile/oprof.c                     |    9 +++++++-
 drivers/xen/manage.c                         |    2 -
 net/iucv/iucv.c                              |   28 +++++++++++++++++----------
 10 files changed, 75 insertions(+), 37 deletions(-)

diff -r 4f815aecbe2d drivers/base/cpu.c
--- a/drivers/base/cpu.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/base/cpu.c	Tue Nov 18 23:25:18 2008 +1030
@@ -107,7 +107,7 @@
 /*
  * Print cpu online, possible, present, and system maps
  */
-static ssize_t print_cpus_map(char *buf, cpumask_t *map)
+static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
 {
 	int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
 
diff -r 4f815aecbe2d drivers/infiniband/hw/ehca/ehca_irq.c
--- a/drivers/infiniband/hw/ehca/ehca_irq.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c	Tue Nov 18 23:25:18 2008 +1030
@@ -659,12 +659,12 @@
 
 	WARN_ON_ONCE(!in_interrupt());
 	if (ehca_debug_level >= 3)
-		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+		ehca_dmp(cpu_online_mask, cpumask_size(), "");
 
 	spin_lock_irqsave(&pool->last_cpu_lock, flags);
 	cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
 	if (cpu >= nr_cpu_ids)
-		cpu = first_cpu(cpu_online_map);
+		cpu = cpumask_first(cpu_online_mask);
 	pool->last_cpu = cpu;
 	spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
 
@@ -855,7 +855,7 @@
 	case CPU_UP_CANCELED_FROZEN:
 		ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
 		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-		kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+		kthread_bind(cct->task, cpumask_any(cpu_online_mask));
 		destroy_comp_task(pool, cpu);
 		break;
 	case CPU_ONLINE:
@@ -902,7 +902,7 @@
 		return -ENOMEM;
 
 	spin_lock_init(&pool->last_cpu_lock);
-	pool->last_cpu = any_online_cpu(cpu_online_map);
+	pool->last_cpu = cpumask_any(cpu_online_mask);
 
 	pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
 	if (pool->cpu_comp_tasks == NULL) {
diff -r 4f815aecbe2d drivers/infiniband/hw/ipath/ipath_file_ops.c
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c	Tue Nov 18 23:25:18 2008 +1030
@@ -1674,7 +1674,7 @@
 	 * InfiniPath chip to that processor (we assume reasonable connectivity,
 	 * for now).  This code assumes that if affinity has been set
 	 * before this point, that at most one cpu is set; for now this
-	 * is reasonable.  I check for both cpus_empty() and cpus_full(),
+	 * is reasonable.  I check for both cpumask_empty() and cpumask_full(),
 	 * in case some kernel variant sets none of the bits when no
 	 * affinity is set.  2.6.11 and 12 kernels have all present
 	 * cpus set.  Some day we'll have to fix it up further to handle
@@ -1683,11 +1683,11 @@
 	 * information.  There may be some issues with dual core numbering
 	 * as well.  This needs more work prior to release.
 	 */
-	if (!cpus_empty(current->cpus_allowed) &&
-	    !cpus_full(current->cpus_allowed)) {
+	if (!cpumask_empty(&current->cpus_allowed) &&
+	    !cpumask_full(&current->cpus_allowed)) {
 		int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
 		for (i = 0; i < ncpus; i++)
-			if (cpu_isset(i, current->cpus_allowed)) {
+			if (cpumask_test_cpu(i, &current->cpus_allowed)) {
 				ipath_cdbg(PROC, "%s[%u] affinity set for "
 					   "cpu %d/%d\n", current->comm,
 					   current->pid, i, ncpus);
diff -r 4f815aecbe2d drivers/misc/sgi-xp/xpc_main.c
--- a/drivers/misc/sgi-xp/xpc_main.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/misc/sgi-xp/xpc_main.c	Tue Nov 18 23:25:18 2008 +1030
@@ -318,7 +318,7 @@
 
 	/* this thread was marked active by xpc_hb_init() */
 
-	set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
+	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
 
 	/* set our heartbeating to other partitions into motion */
 	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
diff -r 4f815aecbe2d drivers/net/sfc/efx.c
--- a/drivers/net/sfc/efx.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/net/sfc/efx.c	Tue Nov 18 23:25:18 2008 +1030
@@ -809,18 +809,17 @@
 /* Get number of RX queues wanted.  Return number of online CPU
  * packages in the expectation that an IRQ balancer will spread
  * interrupts across them. */
-static int efx_wanted_rx_queues(void)
+static int efx_wanted_rx_queues(struct cpumask *scratch)
 {
-	cpumask_t core_mask;
 	int count;
 	int cpu;
 
-	cpus_clear(core_mask);
+	cpumask_clear(scratch);
 	count = 0;
 	for_each_online_cpu(cpu) {
-		if (!cpu_isset(cpu, core_mask)) {
+		if (!cpumask_test_cpu(cpu, scratch)) {
 			++count;
-			cpus_or(core_mask, core_mask,
+			cpumask_or(scratch, scratch,
 				topology_core_siblings(cpu));
 		}
 	}
@@ -831,7 +830,7 @@
 /* Probe the number and type of interrupts we are able to obtain, and
  * the resulting numbers of channels and RX queues.
  */
-static void efx_probe_interrupts(struct efx_nic *efx)
+static void efx_probe_interrupts(struct efx_nic *efx, struct cpumask *scratch)
 {
 	int max_channels =
 		min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -845,7 +844,8 @@
 		 * (or as specified by the rss_cpus module parameter).
 		 * We will need one channel per interrupt.
 		 */
-		wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
+		wanted_ints = rss_cpus ? rss_cpus :
+					 efx_wanted_rx_queues(scratch);
 		efx->n_rx_queues = min(wanted_ints, max_channels);
 
 		for (i = 0; i < efx->n_rx_queues; i++)
@@ -923,24 +923,29 @@
 static int efx_probe_nic(struct efx_nic *efx)
 {
 	int rc;
+	cpumask_var_t scratch;
 
 	EFX_LOG(efx, "creating NIC\n");
+
+	if (!alloc_cpumask_var(&scratch, GFP_KERNEL))
+		return -ENOMEM;
 
 	/* Carry out hardware-type specific initialisation */
 	rc = falcon_probe_nic(efx);
 	if (rc)
-		return rc;
+		goto out;
 
 	/* Determine the number of channels and RX queues by trying to hook
 	 * in MSI-X interrupts. */
-	efx_probe_interrupts(efx);
+	efx_probe_interrupts(efx, scratch);
 
 	efx_set_channels(efx);
 
 	/* Initialise the interrupt moderation settings */
 	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
-
-	return 0;
+out:
+	free_cpumask_var(scratch);
+	return rc;
 }
 
 static void efx_remove_nic(struct efx_nic *efx)
diff -r 4f815aecbe2d drivers/oprofile/buffer_sync.c
--- a/drivers/oprofile/buffer_sync.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/oprofile/buffer_sync.c	Tue Nov 18 23:25:18 2008 +1030
@@ -37,7 +37,7 @@
 
 static LIST_HEAD(dying_tasks);
 static LIST_HEAD(dead_tasks);
-static cpumask_t marked_cpus = CPU_MASK_NONE;
+static cpumask_var_t marked_cpus;
 static DEFINE_SPINLOCK(task_mortuary);
 static void process_task_mortuary(void);
 
@@ -524,10 +524,10 @@
 {
 	int i;
 
-	cpu_set(cpu, marked_cpus);
+	cpumask_set_cpu(cpu, marked_cpus);
 
 	for_each_online_cpu(i) {
-		if (!cpu_isset(i, marked_cpus))
+		if (!cpumask_test_cpu(i, marked_cpus))
 			return;
 	}
 
@@ -536,7 +536,7 @@
 	 */
 	process_task_mortuary();
 
-	cpus_clear(marked_cpus);
+	cpumask_clear(marked_cpus);
 }
 
 
@@ -632,6 +632,20 @@
 	mutex_unlock(&buffer_mutex);
 }
 
+int __init buffer_sync_init(void)
+{
+	if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpumask_copy(marked_cpus, cpu_none_mask);
+		return 0;
+}
+
+void __exit buffer_sync_cleanup(void)
+{
+	free_cpumask_var(marked_cpus);
+}
+
 /* The function can be used to add a buffer worth of data directly to
  * the kernel buffer. The buffer is assumed to be a circular buffer.
  * Take the entries from index start and end at index end, wrapping
diff -r 4f815aecbe2d drivers/oprofile/buffer_sync.h
--- a/drivers/oprofile/buffer_sync.h	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/oprofile/buffer_sync.h	Tue Nov 18 23:25:18 2008 +1030
@@ -19,4 +19,8 @@
 /* sync the given CPU's buffer */
 void sync_buffer(int cpu);
 
+/* initialize/destroy the buffer system. */
+int buffer_sync_init(void);
+void buffer_sync_cleanup(void);
+
 #endif /* OPROFILE_BUFFER_SYNC_H */
diff -r 4f815aecbe2d drivers/oprofile/oprof.c
--- a/drivers/oprofile/oprof.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/oprofile/oprof.c	Tue Nov 18 23:25:18 2008 +1030
@@ -183,6 +183,10 @@
 {
 	int err;
 
+	err = buffer_sync_init();
+	if (err)
+		return err;
+
 	err = oprofile_arch_init(&oprofile_ops);
 
 	if (err < 0 || timer) {
@@ -191,8 +195,10 @@
 	}
 
 	err = oprofilefs_register();
-	if (err)
+	if (err) {
 		oprofile_arch_exit();
+		buffer_sync_cleanup();
+	}
 
 	return err;
 }
@@ -202,6 +208,7 @@
 {
 	oprofilefs_unregister();
 	oprofile_arch_exit();
+	buffer_sync_cleanup();
 }
 
 
diff -r 4f815aecbe2d drivers/xen/manage.c
--- a/drivers/xen/manage.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/drivers/xen/manage.c	Tue Nov 18 23:25:18 2008 +1030
@@ -100,7 +100,7 @@
 	/* XXX use normal device tree? */
 	xenbus_suspend();
 
-	err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
+	err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
 	if (err) {
 		printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
 		goto out;
diff -r 4f815aecbe2d net/iucv/iucv.c
--- a/net/iucv/iucv.c	Tue Nov 18 23:22:46 2008 +1030
+++ b/net/iucv/iucv.c	Tue Nov 18 23:25:18 2008 +1030
@@ -489,15 +489,14 @@
  *
  * Allow iucv interrupts on a single cpu.
  */
-static void iucv_setmask_up(void)
+static void iucv_setmask_up(struct cpumask *cpumask)
 {
-	cpumask_t cpumask;
 	int cpu;
 
 	/* Disable all cpu but the first in cpu_irq_cpumask. */
-	cpumask = iucv_irq_cpumask;
-	cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
-	for_each_cpu_mask_nr(cpu, cpumask)
+	cpumask_copy(cpumask, iucv_irq_cpumask);
+	cpumask_clear_cpu(cpumask_first(iucv_irq_cpumask), cpumask);
+	for_each_cpu(cpu, cpumask)
 		smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
 }
 
@@ -555,7 +554,7 @@
 static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
 				     unsigned long action, void *hcpu)
 {
-	cpumask_t cpumask;
+	cpumask_var_t cpumask;
 	long cpu = (long) hcpu;
 
 	switch (action) {
@@ -590,15 +589,20 @@
 		break;
 	case CPU_DOWN_PREPARE:
 	case CPU_DOWN_PREPARE_FROZEN:
-		cpumask = iucv_buffer_cpumask;
-		cpu_clear(cpu, cpumask);
-		if (cpus_empty(cpumask))
+		if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+			return NOTIFY_BAD;
+		cpumask_copy(cpumask, &iucv_buffer_cpumask);
+		cpumask_clear_cpu(cpu, cpumask);
+		if (cpumask_empty(cpumask)) {
 			/* Can't offline last IUCV enabled cpu. */
+			free_cpumask_var(cpumask);
 			return NOTIFY_BAD;
+		}
 		smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
 		if (cpus_empty(iucv_irq_cpumask))
 			smp_call_function_single(first_cpu(iucv_buffer_cpumask),
 						 iucv_allow_cpu, NULL, 1);
+		free_cpumask_var(cpumask);
 		break;
 	}
 	return NOTIFY_OK;
@@ -683,9 +687,12 @@
 int iucv_register(struct iucv_handler *handler, int smp)
 {
 	int rc;
+	cpumask_var_t scratch;
 
 	if (!iucv_available)
 		return -ENOSYS;
+	if (!alloc_cpumask_var(&scratch, GFP_KERNEL))
+		return -ENOMEM;
 	mutex_lock(&iucv_register_mutex);
 	if (!smp)
 		iucv_nonsmp_handler++;
@@ -694,7 +701,7 @@
 		if (rc)
 			goto out_mutex;
 	} else if (!smp && iucv_nonsmp_handler == 1)
-		iucv_setmask_up();
+		iucv_setmask_up(scratch);
 	INIT_LIST_HEAD(&handler->paths);
 
 	spin_lock_bh(&iucv_table_lock);
@@ -703,6 +710,7 @@
 	rc = 0;
 out_mutex:
 	mutex_unlock(&iucv_register_mutex);
+	free_cpumask_var(scratch);
 	return rc;
 }
 EXPORT_SYMBOL(iucv_register);
