From: Rusty Russell <rusty@rustcorp.com.au>
Subject: cpumask: get rid of cpu_isset, cpu_test_and_set, and cpus_*

They've been long deprecated in favor of the cpumask_ versions (whose
names follow the bitmask functions).  We use spatch to get rid of
them, since there are quite a few remaining.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/ia64/include/asm/acpi.h          |    4 
 arch/ia64/kernel/iosapic.c            |    2 
 arch/ia64/kernel/irq_ia64.c           |   24 ++---
 arch/ia64/kernel/mca.c                |    4 
 arch/ia64/kernel/numa.c               |    6 -
 arch/ia64/kernel/salinfo.c            |    8 -
 arch/ia64/kernel/setup.c              |    6 -
 arch/ia64/kernel/smpboot.c            |    8 -
 arch/mips/kernel/irq-gic.c            |    2 
 arch/mips/kernel/mips-mt-fpaff.c      |    4 
 arch/mips/kernel/process.c            |    3 
 arch/mips/kernel/smp.c                |    2 
 arch/mips/kernel/smtc.c               |    2 
 arch/mips/mti-malta/malta-smtc.c      |    2 
 arch/mips/netlogic/common/smp.c       |    2 
 arch/mips/sgi-ip27/ip27-klnuma.c      |    6 -
 arch/mips/sgi-ip27/ip27-memory.c      |    2 
 arch/parisc/kernel/irq.c              |    2 
 arch/tile/kernel/setup.c              |    2 
 arch/um/kernel/smp.c                  |    8 -
 arch/x86/platform/uv/tlb_uv.c         |    4 
 drivers/crypto/n2_core.c              |    2 
 drivers/staging/octeon/ethernet-rx.c  |    2 
 scripts/coccinelle/misc/cpumask.cocci |  143 ++++++++++++++++++++++++++++++++++
 24 files changed, 197 insertions(+), 53 deletions(-)

diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -176,13 +176,13 @@ static inline void per_cpu_scan_finalize
 	int cpu;
 	int next_nid = 0;
 
-	low_cpu = cpus_weight(early_cpu_possible_map);
+	low_cpu = cpumask_weight(&early_cpu_possible_map);
 
 	high_cpu = max(low_cpu, min_cpus);
 	high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
 
 	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
-		cpu_set(cpu, early_cpu_possible_map);
+		cpumask_set_cpu(cpu, &early_cpu_possible_map);
 		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
 			node_cpuid[cpu].nid = next_nid;
 			next_nid++;
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -690,7 +690,7 @@ skip_numa_setup:
 	do {
 		if (++cpu >= nr_cpu_ids)
 			cpu = 0;
-	} while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
+	} while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));
 
 	return cpu_physical_id(cpu);
 #else  /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -118,13 +118,13 @@ static inline int find_unassigned_vector
 	int pos, vector;
 
 	cpumask_and(&mask, &domain, cpu_online_mask);
-	if (cpus_empty(mask))
+	if (cpumask_empty(&mask))
 		return -EINVAL;
 
 	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
 		vector = IA64_FIRST_DEVICE_VECTOR + pos;
-		cpus_and(mask, domain, vector_table[vector]);
-		if (!cpus_empty(mask))
+		cpumask_and(&mask, &domain, &vector_table[vector]);
+		if (!cpumask_empty(&mask))
 			continue;
 		return vector;
 	}
@@ -141,9 +141,9 @@ static int __bind_irq_vector(int irq, in
 	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
 
 	cpumask_and(&mask, &domain, cpu_online_mask);
-	if (cpus_empty(mask))
+	if (cpumask_empty(&mask))
 		return -EINVAL;
-	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
+	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
 		return 0;
 	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
 		return -EBUSY;
@@ -152,7 +152,7 @@ static int __bind_irq_vector(int irq, in
 	cfg->vector = vector;
 	cfg->domain = domain;
 	irq_status[irq] = IRQ_USED;
-	cpus_or(vector_table[vector], vector_table[vector], domain);
+	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
 	return 0;
 }
 
@@ -184,7 +184,7 @@ static void __clear_irq_vector(int irq)
 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
 	cfg->domain = CPU_MASK_NONE;
 	irq_status[irq] = IRQ_UNUSED;
-	cpus_andnot(vector_table[vector], vector_table[vector], domain);
+	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
 }
 
 static void clear_irq_vector(int irq)
@@ -253,7 +253,7 @@ void __setup_vector_irq(int cpu)
 		per_cpu(vector_irq, cpu)[vector] = -1;
 	/* Mark the inuse vectors */
 	for (irq = 0; irq < NR_IRQS; ++irq) {
-		if (!cpu_isset(cpu, irq_cfg[irq].domain))
+		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
 			continue;
 		vector = irq_to_vector(irq);
 		per_cpu(vector_irq, cpu)[vector] = irq;
@@ -284,7 +284,7 @@ static int __irq_prepare_move(int irq, i
 		return -EBUSY;
 	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
 		return -EINVAL;
-	if (cpu_isset(cpu, cfg->domain))
+	if (cpumask_test_cpu(cpu, &cfg->domain))
 		return 0;
 	domain = vector_allocation_domain(cpu);
 	vector = find_unassigned_vector(domain);
@@ -318,11 +318,11 @@ void irq_complete_move(unsigned irq)
 	if (likely(!cfg->move_in_progress))
 		return;
 
-	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
+	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
 		return;
 
 	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
-	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
 	for_each_cpu(i, &cleanup_mask)
 		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
 	cfg->move_in_progress = 0;
@@ -349,7 +349,7 @@ static irqreturn_t smp_irq_move_cleanup_
 		if (!cfg->move_cleanup_count)
 			goto unlock;
 
-		if (!cpu_isset(me, cfg->old_domain))
+		if (!cpumask_test_cpu(me, &cfg->old_domain))
 			goto unlock;
 
 		spin_lock_irqsave(&vector_lock, flags);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, s
 		 */
 		ia64_mca_wakeup_all();
 	} else {
-		while (cpu_isset(cpu, mca_cpu))
+		while (cpumask_test_cpu(cpu, &mca_cpu))
 			cpu_relax();	/* spin until monarch wakes us */
 	}
 
@@ -1355,7 +1355,7 @@ ia64_mca_handler(struct pt_regs *regs, s
 		 * and put this cpu in the rendez loop.
 		 */
 		for_each_online_cpu(i) {
-			if (cpu_isset(i, mca_cpu)) {
+			if (cpumask_test_cpu(i, &mca_cpu)) {
 				monarch_cpu = i;
 				cpumask_clear_cpu(i, &mca_cpu);	/* wake next cpu */
 				while (monarch_cpu != -1)
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -39,7 +39,7 @@ void __cpuinit map_cpu_to_node(int cpu, 
 	}
 	/* sanity check first */
 	oldnid = cpu_to_node_map[cpu];
-	if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) {
+	if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
 		return; /* nothing to do */
 	}
 	/* we don't have cpu-driven node hot add yet...
@@ -53,7 +53,7 @@ void __cpuinit map_cpu_to_node(int cpu, 
 
 void __cpuinit unmap_cpu_from_node(int cpu, int nid)
 {
-	WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
+	WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
 	WARN_ON(cpu_to_node_map[cpu] != nid);
 	cpu_to_node_map[cpu] = 0;
 	cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
@@ -71,7 +71,7 @@ void __init build_cpu_to_node_map(void)
 	int cpu, i, node;
 
 	for(node=0; node < MAX_NUMNODES; node++)
-		cpus_clear(node_to_cpu_mask[node]);
+		cpumask_clear(&node_to_cpu_mask[node]);
 
 	for_each_possible_early_cpu(cpu) {
 		node = -1;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -273,7 +273,7 @@ salinfo_timeout_check(struct salinfo_dat
 	unsigned long flags;
 	if (!data->open)
 		return;
-	if (!cpus_empty(data->cpu_event)) {
+	if (!cpumask_empty(&data->cpu_event)) {
 		spin_lock_irqsave(&data_saved_lock, flags);
 		salinfo_work_to_do(data);
 		spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -309,7 +309,7 @@ salinfo_event_read(struct file *file, ch
 	int i, n, cpu = -1;
 
 retry:
-	if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
+	if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) {
 		if (file->f_flags & O_NONBLOCK)
 			return -EAGAIN;
 		if (down_interruptible(&data->mutex))
@@ -318,7 +318,7 @@ retry:
 
 	n = data->cpu_check;
 	for (i = 0; i < nr_cpu_ids; i++) {
-		if (cpu_isset(n, data->cpu_event)) {
+		if (cpumask_test_cpu(n, &data->cpu_event)) {
 			if (!cpu_online(n)) {
 				cpumask_clear_cpu(n, &data->cpu_event);
 				continue;
@@ -496,7 +496,7 @@ salinfo_log_clear(struct salinfo_data *d
 	unsigned long flags;
 	spin_lock_irqsave(&data_saved_lock, flags);
 	data->state = STATE_NO_DATA;
-	if (!cpu_isset(cpu, data->cpu_event)) {
+	if (!cpumask_test_cpu(cpu, &data->cpu_event)) {
 		spin_unlock_irqrestore(&data_saved_lock, flags);
 		return 0;
 	}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -562,8 +562,8 @@ setup_arch (char **cmdline_p)
 #  ifdef CONFIG_ACPI_HOTPLUG_CPU
 	prefill_possible_map();
 #  endif
-	per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
-		32 : cpus_weight(early_cpu_possible_map)),
+	per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
+		32 : cpumask_weight(&early_cpu_possible_map)),
 		additional_cpus > 0 ? additional_cpus : 0);
 # endif
 #endif /* CONFIG_APCI_BOOT */
@@ -702,7 +702,7 @@ show_cpuinfo (struct seq_file *m, void *
 		   c->itc_freq / 1000000, c->itc_freq % 1000000,
 		   lpj*HZ/500000, (lpj*HZ/5000) % 100);
 #ifdef CONFIG_SMP
-	seq_printf(m, "siblings   : %u\n", cpus_weight(cpu_core_map[cpunum]));
+	seq_printf(m, "siblings   : %u\n", cpumask_weight(&cpu_core_map[cpunum]));
 	if (c->socket_id != -1)
 		seq_printf(m, "physical id: %u\n", c->socket_id);
 	if (c->threads_per_core > 1 || c->cores_per_socket > 1)
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -538,13 +538,13 @@ do_rest:
 	 */
 	Dprintk("Waiting on callin_map ...");
 	for (timeout = 0; timeout < 100000; timeout++) {
-		if (cpu_isset(cpu, cpu_callin_map))
+		if (cpumask_test_cpu(cpu, &cpu_callin_map))
 			break;  /* It has booted */
 		udelay(100);
 	}
 	Dprintk("\n");
 
-	if (!cpu_isset(cpu, cpu_callin_map)) {
+	if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
 		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
 		ia64_cpu_to_sapicid[cpu] = -1;
 		set_cpu_online(cpu, false);  /* was set in smp_callin() */
@@ -660,7 +660,7 @@ remove_siblinginfo(int cpu)
 		return;
 	}
 
-	last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
+	last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
 
 	/* remove it from all sibling map's */
 	clear_cpu_sibling_map(cpu);
@@ -808,7 +808,7 @@ int __cpuinit
 	 * Already booted cpu? not valid anymore since we dont
 	 * do idle loop tightspin anymore.
 	 */
-	if (cpu_isset(cpu, cpu_callin_map))
+	if (cpumask_test_cpu(cpu, &cpu_callin_map))
 		return -EINVAL;
 
 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -126,7 +126,7 @@ static int gic_set_affinity(struct irq_d
 
 	pr_debug("%s(%d) called\n", __func__, irq);
 	cpumask_and(&tmp, cpumask, cpu_online_mask);
-	if (cpus_empty(tmp))
+	if (cpumask_empty(&tmp))
 		return -1;
 
 	/* Assumption : cpumask refers to a single CPU */
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -114,8 +114,8 @@ asmlinkage long mipsmt_sys_sched_setaffi
 	/* Compute new global allowed CPU set if necessary */
 	ti = task_thread_info(p);
 	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
-	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
-		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
+	    cpumask_intersects(&*new_mask, &mt_fpu_cpumask)) {
+		cpumask_and(&*effective_mask, &*new_mask, &mt_fpu_cpumask);
 		retval = set_cpus_allowed_ptr(p, effective_mask);
 	} else {
 		cpumask_copy(effective_mask, new_mask);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -72,7 +72,8 @@ void __noreturn cpu_idle(void)
 			}
 		}
 #ifdef CONFIG_HOTPLUG_CPU
-		if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
+		if (!cpu_online(cpu) && !cpumask_test_cpu(cpu,
+							  &cpu_callin_map) &&
 		    (system_state == SYSTEM_RUNNING ||
 		     system_state == SYSTEM_BOOTING))
 			play_dead();
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -245,7 +245,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
 	/*
 	 * Trust is futile.  We should really have timeouts ...
 	 */
-	while (!cpu_isset(cpu, cpu_callin_map))
+	while (!cpumask_test_cpu(cpu, &cpu_callin_map))
 		udelay(100);
 
 	set_cpu_online(cpu, true);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -311,7 +311,7 @@ int __init smtc_build_cpu_map(int start_
 	}
 #ifdef CONFIG_MIPS_MT_FPAFF
 	/* Initialize map of CPUs with FPUs */
-	cpus_clear(mt_fpu_cpumask);
+	cpumask_clear(&mt_fpu_cpumask);
 #endif
 
 	/* One of those TC's is the one booting, and not a secondary... */
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -146,7 +146,7 @@ int plat_set_irq_affinity(struct irq_dat
 	}
 	cpumask_copy(d->affinity, &tmask);
 
-	if (cpus_empty(tmask))
+	if (cpumask_empty(&tmask))
 		/*
 		 * We could restore a default mask here, but the
 		 * runtime code can anyway deal with the null set
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -160,7 +160,7 @@ void __init nlm_smp_setup(void)
 	int num_cpus, i;
 
 	boot_cpu = hard_smp_processor_id();
-	cpus_clear(phys_cpu_present_map);
+	cpumask_clear(&phys_cpu_present_map);
 
 	cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
 	__cpu_number_map[boot_cpu] = 0;
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -29,7 +29,7 @@ static cpumask_t ktext_repmask;
 void __init setup_replication_mask(void)
 {
 	/* Set only the master cnode's bit.  The master cnode is always 0. */
-	cpus_clear(ktext_repmask);
+	cpumask_clear(&ktext_repmask);
 	cpumask_set_cpu(0, &ktext_repmask);
 
 #ifdef CONFIG_REPLICATE_KTEXT
@@ -99,7 +99,7 @@ void __init replicate_kernel_text()
 		client_nasid = COMPACT_TO_NASID_NODEID(cnode);
 
 		/* Check if this node should get a copy of the kernel */
-		if (cpu_isset(cnode, ktext_repmask)) {
+		if (cpumask_test_cpu(cnode, &ktext_repmask)) {
 			server_nasid = client_nasid;
 			copy_kernel(server_nasid);
 		}
@@ -124,7 +124,7 @@ pfn_t node_getfirstfree(cnodeid_t cnode)
 	loadbase += 16777216;
 #endif
 	offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
-	if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
+	if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask)))
 		return (TO_NODE(nasid, offset) >> PAGE_SHIFT);
 	else
 		return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >>
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -406,7 +406,7 @@ static void __init node_mem_init(cnodeid
 	NODE_DATA(node)->node_start_pfn = start_pfn;
 	NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
 
-	cpus_clear(hub_data(node)->h_cpus);
+	cpumask_clear(&hub_data(node)->h_cpus);
 
 	slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
 			       sizeof(struct hub_data));
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -354,7 +354,7 @@ void do_cpu_irq_mask(struct pt_regs *reg
 	desc = irq_to_desc(irq);
 	cpumask_copy(&dest, desc->irq_data.affinity);
 	if (irqd_is_per_cpu(&desc->irq_data) &&
-	    !cpu_isset(smp_processor_id(), dest)) {
+	    !cpumask_test_cpu(smp_processor_id(), &dest)) {
 		int cpu = cpumask_first(&dest);
 
 		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -634,7 +634,7 @@ static void __init zone_sizes_init(void)
 			if (node_percpu[i])
 				node_percpu_pfn[i] =
 				    alloc_bootmem_pfn(node_percpu[i], goal);
-		} else if (cpu_isset(i, isolnodes)) {
+		} else if (cpumask_test_cpu(i, &isolnodes)) {
 			node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
 			BUG_ON(node_percpu[i] != 0);
 		} else {
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -67,12 +67,12 @@ static int idle_proc(void *cpup)
 	os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
 
 	wmb();
-	if (cpu_test_and_set(cpu, cpu_callin_map)) {
+	if (cpumask_test_and_set_cpu(cpu, &cpu_callin_map)) {
 		printk(KERN_ERR "huh, CPU#%d already present??\n", cpu);
 		BUG();
 	}
 
-	while (!cpu_isset(cpu, smp_commenced_mask))
+	while (!cpumask_test_cpu(cpu, &smp_commenced_mask))
 		cpu_relax();
 
 	notify_cpu_starting(cpu);
@@ -127,11 +127,11 @@ void smp_prepare_cpus(unsigned int maxcp
 		init_idle(idle, cpu);
 
 		waittime = 200000000;
-		while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
+		while (waittime-- && !cpumask_test_cpu(cpu, &cpu_callin_map))
 			cpu_relax();
 
 		printk(KERN_INFO "%s\n",
-		       cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
+		       cpumask_test_cpu(cpu, &cpu_calling_map) ? "done" : "failed");
 	}
 }
 
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -380,7 +380,7 @@ static void reset_with_ipi(struct pnmask
 	struct reset_args reset_args;
 
 	reset_args.sender = sender;
-	cpus_clear(*mask);
+	cpumask_clear(mask);
 	/* find a single cpu for each uvhub in this distribution mask */
 	maskbits = sizeof(struct pnmask) * BITSPERBYTE;
 	/* each bit is a pnode relative to the partition base pnode */
@@ -1101,7 +1101,7 @@ const struct cpumask *uv_flush_tlb_other
 	/* don't actually do a shootdown of the local cpu */
 	cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
 
-	if (cpu_isset(cpu, *cpumask))
+	if (cpumask_test_cpu(cpu, cpumask))
 		stat->s_ntargself++;
 
 	bau_desc = bcp->descriptor_base;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1780,7 +1780,7 @@ static int handle_exec_unit(struct spu_m
 		return -ENOMEM;
 	}
 
-	cpus_clear(p->sharing);
+	cpumask_clear(&p->sharing);
 	spin_lock_init(&p->lock);
 	p->q_type = q_type;
 	INIT_LIST_HEAD(&p->jobs);
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -98,7 +98,7 @@ static void cvm_oct_enable_one_cpu(void)
 
 	/* ... if a CPU is available, Turn on NAPI polling for that CPU.  */
 	for_each_online_cpu(cpu) {
-		if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
+		if (!cpumask_test_and_set_cpu(cpu, &core_state.cpu_state)) {
 			v = smp_call_function_single(cpu, cvm_oct_enable_napi,
 						     NULL, 0);
 			if (v)
diff --git a/scripts/coccinelle/misc/cpumask.cocci b/scripts/coccinelle/misc/cpumask.cocci
--- a/scripts/coccinelle/misc/cpumask.cocci
+++ b/scripts/coccinelle/misc/cpumask.cocci
@@ -32,3 +32,146 @@ expression M;
 
 -cpu_clear(E, M);
 +cpumask_clear_cpu(E, &M);
+
+@@
+expression M;
+@@
+
+-cpus_setall(*M);
++cpumask_setall(M);
+
+@@
+expression M;
+@@
+
+-cpus_setall(M);
++cpumask_setall(&M);
+
+@@
+expression M;
+@@
+
+-cpus_clear(*M);
++cpumask_clear(M);
+
+@@
+expression M;
+@@
+
+-cpus_clear(M);
++cpumask_clear(&M);
+
+@@
+expression E;
+expression M;
+@@
+
+-cpu_isset(E, *M)
++cpumask_test_cpu(E, M)
+
+@@
+expression E;
+expression M;
+@@
+
+-cpu_isset(E, M)
++cpumask_test_cpu(E, &M)
+
+@@
+expression E;
+expression M;
+@@
+
+-cpu_test_and_set(E, *M)
++cpumask_test_and_set_cpu(E, M)
+
+@@
+expression E;
+expression M;
+@@
+
+-cpu_test_and_set(E, M)
++cpumask_test_and_set_cpu(E, &M)
+
+@@
+expression E;
+expression M;
+@@
+
+-cpu_test_and_set(E, M)
++cpumask_test_and_set_cpu(E, &M)
+
+@@
+expression M1;
+expression M2;
+expression M3;
+@@
+
+-cpus_and(M1, M2, M3)
++cpumask_and(&M1, &M2, &M3)
+
+@@
+expression M1;
+expression M2;
+expression M3;
+@@
+
+-cpus_or(M1, M2, M3)
++cpumask_or(&M1, &M2, &M3)
+
+@@
+expression M1;
+expression M2;
+expression M3;
+@@
+
+-cpus_xor(M1, M2, M3)
++cpumask_xor(&M1, &M2)
+
+@@
+expression M1;
+expression M2;
+expression M3;
+@@
+
+-cpus_andnot(M1, M2, M3)
++cpumask_andnot(&M1, &M2, &M3)
+
+@@
+expression M1;
+expression M2;
+@@
+
+-cpus_equal(M1, M2)
++cpumask_equal(&M1, &M2)
+
+@@
+expression M1;
+expression M2;
+@@
+
+-cpus_intersects(M1, M2)
++cpumask_intersects(&M1, &M2)
+
+@@
+expression M1;
+expression M2;
+@@
+
+-cpus_subset(M1, M2)
++cpumask_subset(&M1, &M2)
+
+@@
+expression M1;
+@@
+
+-cpus_empty(M1)
++cpumask_empty(&M1)
+
+@@
+expression M1;
+@@
+
+-cpus_weight(M1)
++cpumask_weight(&M1)
+
