cpumask: remove any_online_cpu() users.

any_online_cpu() is a good name, but it takes a cpumask_t, not a
pointer.

There are several places where any_online_cpu() doesn't really want a
mask arg at all.  Replace all callers with cpumask_any() and
cpumask_any_both().

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/x86/kernel/irq_32.c |    2 +-
 kernel/sched.c           |   14 ++++++++------
 kernel/softirq.c         |    2 +-
 kernel/softlockup.c      |    6 +++---
 mm/vmscan.c              |    2 +-
 5 files changed, 14 insertions(+), 12 deletions(-)

diff -r d1d05cfc61a6 arch/x86/kernel/irq_32.c
--- a/arch/x86/kernel/irq_32.c	Sun Sep 28 21:54:58 2008 +1000
+++ b/arch/x86/kernel/irq_32.c	Sun Sep 28 21:56:39 2008 +1000
@@ -402,7 +402,7 @@ void fixup_irqs(cpumask_t map)
 			continue;
 
 		cpus_and(mask, irq_desc[irq].affinity, map);
-		if (any_online_cpu(mask) >= nr_cpu_ids) {
+		if (cpumask_any_both(cpu_online_mask, &mask) >= nr_cpu_ids) {
 			printk("Breaking affinity for irq %i\n", irq);
 			mask = map;
 		}
diff -r d1d05cfc61a6 kernel/sched.c
--- a/kernel/sched.c	Sun Sep 28 21:54:58 2008 +1000
+++ b/kernel/sched.c	Sun Sep 28 21:56:39 2008 +1000
@@ -5905,7 +5905,7 @@ int set_cpus_allowed_ptr(struct task_str
 	if (cpu_isset(task_cpu(p), *new_mask))
 		goto out;
 
-	if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
+	if (migrate_task(p, cpumask_any_both(cpu_online_mask, new_mask), &req)) {
 		/* Need help from migration thread: drop lock and wait. */
 		task_rq_unlock(rq, &flags);
 		wake_up_process(rq->migration_thread);
@@ -6054,11 +6054,12 @@ static void move_task_off_dead_cpu(int d
 		/* On same node? */
 		mask = node_to_cpumask(cpu_to_node(dead_cpu));
 		cpus_and(mask, mask, p->cpus_allowed);
-		dest_cpu = any_online_cpu(mask);
+		dest_cpu = cpumask_any_both(cpu_online_mask, &mask);
 
 		/* On any allowed CPU? */
 		if (dest_cpu >= nr_cpu_ids)
-			dest_cpu = any_online_cpu(p->cpus_allowed);
+			dest_cpu = cpumask_any_both(cpu_online_mask,
+						    &p->cpus_allowed);
 
 		/* No more Mr. Nice Guy. */
 		if (dest_cpu >= nr_cpu_ids) {
@@ -6074,7 +6075,8 @@ static void move_task_off_dead_cpu(int d
 			 */
 			rq = task_rq_lock(p, &flags);
 			p->cpus_allowed = cpus_allowed;
-			dest_cpu = any_online_cpu(p->cpus_allowed);
+			dest_cpu = cpumask_any_both(cpu_online_mask,
+						    &p->cpus_allowed);
 			task_rq_unlock(rq, &flags);
 
 			/*
@@ -6100,7 +6102,7 @@ static void move_task_off_dead_cpu(int d
  */
 static void migrate_nr_uninterruptible(struct rq *rq_src)
 {
-	struct rq *rq_dest = cpu_rq(any_online_cpu(cpu_mask_all));
+	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
 	unsigned long flags;
 
 	local_irq_save(flags);
@@ -6463,7 +6465,7 @@ migration_call(struct notifier_block *nf
 			break;
 		/* Unbind it from offline cpu so it can run. Fall thru. */
 		kthread_bind(cpu_rq(cpu)->migration_thread,
-			     any_online_cpu(cpu_online_map));
+			     cpumask_any(cpu_online_mask));
 		kthread_stop(cpu_rq(cpu)->migration_thread);
 		cpu_rq(cpu)->migration_thread = NULL;
 		break;
diff -r d1d05cfc61a6 kernel/softirq.c
--- a/kernel/softirq.c	Sun Sep 28 21:54:58 2008 +1000
+++ b/kernel/softirq.c	Sun Sep 28 21:56:39 2008 +1000
@@ -609,7 +609,7 @@ static int __cpuinit cpu_callback(struct
 			break;
 		/* Unbind so it can run.  Fall thru. */
 		kthread_bind(per_cpu(ksoftirqd, hotcpu),
-			     any_online_cpu(cpu_online_map));
+			     cpumask_any(cpu_online_mask));
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN: {
 		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
diff -r d1d05cfc61a6 kernel/softlockup.c
--- a/kernel/softlockup.c	Sun Sep 28 21:54:58 2008 +1000
+++ b/kernel/softlockup.c	Sun Sep 28 21:56:39 2008 +1000
@@ -303,7 +303,7 @@ cpu_callback(struct notifier_block *nfb,
 		break;
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
-		check_cpu = any_online_cpu(cpu_online_map);
+		check_cpu = cpumask_any(cpu_online_mask);
 		wake_up_process(per_cpu(watchdog_task, hotcpu));
 		break;
 #ifdef CONFIG_HOTPLUG_CPU
@@ -313,7 +313,7 @@ cpu_callback(struct notifier_block *nfb,
 			cpumask_t temp_cpu_online_map = cpu_online_map;
 
 			cpu_clear(hotcpu, temp_cpu_online_map);
-			check_cpu = any_online_cpu(temp_cpu_online_map);
+			check_cpu = cpumask_any(&temp_cpu_online_map);
 		}
 		break;
 
@@ -323,7 +323,7 @@ cpu_callback(struct notifier_block *nfb,
 			break;
 		/* Unbind so it can run.  Fall thru. */
 		kthread_bind(per_cpu(watchdog_task, hotcpu),
-			     any_online_cpu(cpu_online_map));
+			     cpumask_any(cpu_online_mask));
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN:
 		p = per_cpu(watchdog_task, hotcpu);
diff -r d1d05cfc61a6 mm/vmscan.c
--- a/mm/vmscan.c	Sun Sep 28 21:54:58 2008 +1000
+++ b/mm/vmscan.c	Sun Sep 28 21:56:39 2008 +1000
@@ -1926,7 +1926,7 @@ static int __devinit cpu_callback(struct
 			pg_data_t *pgdat = NODE_DATA(nid);
 			node_to_cpumask_ptr(mask, pgdat->node_id);
 
-			if (any_online_cpu(*mask) < nr_cpu_ids)
+			if (cpumask_any_both(cpu_online_mask, mask) < nr_cpu_ids)
 				/* One of our CPUs online: restore mask */
 				set_cpus_allowed_ptr(pgdat->kswapd, mask);
 		}
