From: Rusty Russell <rusty@rustcorp.com.au>
Subject: cpumask: get rid of CPU_MASK_ALL, CPU_MASK_NONE.

This was done manually, as replacements vary.

(1) Global or static variables initialized to CPU_MASK_NONE don't
    need to be, so they were simply commented out.

(2) Clearing or setting a full cpumask should use the cpumask operations.

(3) The cpu_all_mask variable a const struct cpumask pointer, which is used
    in some places.

(4) A few places initialized a cpumask, only to overwrite it.

(5) arch/cris/arch-v32/kernel/irq.c is the only place which really wants
    a static initialization, so I used { CPU_BITS_ALL } here.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/cris/arch-v32/kernel/irq.c       |    2 
 arch/cris/arch-v32/kernel/smp.c       |    2 
 arch/ia64/kernel/acpi.c               |    2 
 arch/ia64/kernel/iosapic.c            |    6 -
 arch/ia64/kernel/irq_ia64.c           |   23 ++----
 arch/ia64/kernel/smpboot.c            |    3 
 arch/mips/kernel/irq-gic.c            |    2 
 arch/mips/sgi-ip27/ip27-memory.c      |    6 -
 arch/mn10300/kernel/smp.c             |    2 
 arch/powerpc/include/asm/cputhreads.h |    2 
 arch/powerpc/kernel/crash.c           |    2 
 arch/powerpc/xmon/xmon.c              |    2 
 arch/sparc/kernel/smp_32.c            |    2 
 arch/sparc/kernel/smp_64.c            |    5 -
 arch/um/kernel/smp.c                  |    4 -
 drivers/staging/octeon/ethernet-rx.c  |    2 
 include/linux/cpumask.h               |  129 ----------------------------------
 17 files changed, 31 insertions(+), 165 deletions(-)

diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -51,7 +51,7 @@ struct cris_irq_allocation
 };
 
 struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
-  { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
+  { [0 ... NR_REAL_IRQS - 1] = {0, { CPU_BITS_ALL } } };
 
 static unsigned long irq_regs[NR_CPUS] =
 {
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -31,7 +31,7 @@ spinlock_t cris_atomic_locks[] = {
 };
 
 /* CPU masks */
-cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+cpumask_t phys_cpu_present_map /* = CPU_MASK_NONE */;
 EXPORT_SYMBOL(phys_cpu_present_map);
 
 /* Variables used during SMP boot */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -426,7 +426,7 @@ static u32 __devinitdata pxm_flag[PXM_FL
 #define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
 #define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
 static struct acpi_table_slit __initdata *slit_table;
-cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
+cpumask_t early_cpu_possible_map; /* = CPU_MASK_NONE */
 
 static int __init
 get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -870,7 +870,7 @@ iosapic_register_platform_intr (u32 int_
 	switch (int_type) {
 	      case ACPI_INTERRUPT_PMI:
 		irq = vector = iosapic_vector;
-		bind_irq_vector(irq, vector, CPU_MASK_ALL);
+		bind_irq_vector(irq, vector, *cpu_all_mask);
 		/*
 		 * since PMI vector is alloc'd by FW(ACPI) not by kernel,
 		 * we need to make sure the vector is available
@@ -887,7 +887,7 @@ iosapic_register_platform_intr (u32 int_
 		break;
 	      case ACPI_INTERRUPT_CPEI:
 		irq = vector = IA64_CPE_VECTOR;
-		BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
+		BUG_ON(bind_irq_vector(irq, vector, *cpu_all_mask));
 		delivery = IOSAPIC_FIXED;
 		mask = 1;
 		break;
@@ -924,7 +924,7 @@ iosapic_override_isa_irq (unsigned int i
 	unsigned char dmode;
 
 	irq = vector = isa_irq_to_vector(isa_irq);
-	BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
+	BUG_ON(bind_irq_vector(irq, vector, *cpu_all_mask));
 	dmode = choose_dmode();
 	register_intr(gsi, irq, dmode, polarity, trigger);
 
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -78,7 +78,7 @@ DEFINE_SPINLOCK(vector_lock);
 struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 	[0 ... NR_IRQS - 1] = {
 		.vector = IRQ_VECTOR_UNASSIGNED,
-		.domain = CPU_MASK_NONE
+		/* .domain = CPU_MASK_NONE */
 	}
 };
 
@@ -86,9 +86,7 @@ DEFINE_PER_CPU(int[IA64_NUM_VECTORS], ve
 	[0 ... IA64_NUM_VECTORS - 1] = -1
 };
 
-static cpumask_t vector_table[IA64_NUM_VECTORS] = {
-	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
-};
+static cpumask_t vector_table[IA64_NUM_VECTORS]; /* = CPU_MASK_NONE */
 
 static int irq_status[NR_IRQS] = {
 	[0 ... NR_IRQS -1] = IRQ_UNUSED
@@ -182,7 +180,7 @@ static void __clear_irq_vector(int irq)
 	for_each_cpu(cpu, &mask)
 		per_cpu(vector_irq, cpu)[vector] = -1;
 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
-	cfg->domain = CPU_MASK_NONE;
+	cpumask_clear(&cfg->domain);
 	irq_status[irq] = IRQ_UNUSED;
 	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
 }
@@ -201,8 +199,9 @@ ia64_native_assign_irq_vector (int irq)
 {
 	unsigned long flags;
 	int vector, cpu;
-	cpumask_t domain = CPU_MASK_NONE;
+	cpumask_t domain;
 
+	cpumask_clear(&domain);
 	vector = -ENOSPC;
 
 	spin_lock_irqsave(&vector_lock, flags);
@@ -237,7 +236,7 @@ reserve_irq_vector (int vector)
 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
 	    vector > IA64_LAST_DEVICE_VECTOR)
 		return -EINVAL;
-	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
+	return !!bind_irq_vector(vector, vector, *cpu_all_mask);
 }
 
 /*
@@ -271,7 +270,7 @@ static cpumask_t vector_allocation_domai
 {
 	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
 		return *cpumask_of(cpu);
-	return CPU_MASK_ALL;
+	return *cpu_all_mask;
 }
 
 static int __irq_prepare_move(int irq, int cpu)
@@ -293,7 +292,7 @@ static int __irq_prepare_move(int irq, i
 	cfg->move_in_progress = 1;
 	cfg->old_domain = cfg->domain;
 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
-	cfg->domain = CPU_MASK_NONE;
+	cpumask_clear(&cfg->domain);
 	BUG_ON(__bind_irq_vector(irq, vector, domain));
 	return 0;
 }
@@ -383,7 +382,7 @@ early_param("vector", parse_vector_domai
 #else
 static cpumask_t vector_allocation_domain(int cpu)
 {
-	return CPU_MASK_ALL;
+	return *cpu_all_mask;
 }
 #endif
 
@@ -407,7 +406,7 @@ int create_irq(void)
 {
 	unsigned long flags;
 	int irq, vector, cpu;
-	cpumask_t domain = CPU_MASK_NONE;
+	cpumask_t domain;
 
 	irq = vector = -ENOSPC;
 	spin_lock_irqsave(&vector_lock, flags);
@@ -630,7 +629,7 @@ ia64_native_register_percpu_irq (ia64_ve
 	unsigned int irq;
 
 	irq = vec;
-	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
+	BUG_ON(bind_irq_vector(irq, vec, *cpu_all_mask));
 	irq_set_status_flags(irq, IRQ_PER_CPU);
 	irq_set_chip(irq, &irq_type_ia64_lsapic);
 	if (action)
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -645,7 +645,8 @@ clear_cpu_sibling_map(int cpu)
 	for_each_cpu(i, &cpu_core_map[cpu])
 		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
 
-	per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
+	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
+	cpumask_clear(&cpu_core_map[cpu]);
 }
 
 static void
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -120,7 +120,7 @@ static int gic_set_affinity(struct irq_d
 			    bool force)
 {
 	unsigned int irq = d->irq - _irqbase;
-	cpumask_t	tmp = CPU_MASK_NONE;
+	cpumask_t	tmp;
 	unsigned long	flags;
 	int		i;
 
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -424,11 +424,7 @@ static void __init node_mem_init(cnodeid
  * A node with nothing.  We use it to avoid any special casing in
  * cpumask_of_node
  */
-static struct node_data null_node = {
-	.hub = {
-		.h_cpus = CPU_MASK_NONE
-	}
-};
+static struct node_data null_node; /* .hub.h_cpus = CPU_MASK_NONE */
 
 /*
  * Currently, the intranode memory hole support assumes that each slot
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -92,7 +92,7 @@ struct mn10300_cpuinfo cpu_data[NR_CPUS]
 
 static int cpucount;			/* The count of boot CPUs */
 static cpumask_t smp_commenced_mask;
-cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+cpumask_t cpu_initialized __initdata; /* = CPU_MASK_NONE */
 
 /*
  * Function Prototypes
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -23,7 +23,7 @@ extern cpumask_t threads_core_mask;
 #else
 #define threads_per_core	1
 #define threads_shift		0
-#define threads_core_mask	(CPU_MASK_CPU0)
+#define threads_core_mask	(*cpumask_of(0))
 #endif
 
 /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -68,7 +68,7 @@ static int handle_fault(struct pt_regs *
 static atomic_t cpus_in_crash;
 void crash_ipi_callback(struct pt_regs *regs)
 {
-	static cpumask_t cpus_state_saved = CPU_MASK_NONE;
+	static cpumask_t cpus_state_saved; /* = CPU_MASK_NONE */
 
 	int cpu = smp_processor_id();
 
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -55,7 +55,7 @@
 #define skipbl	xmon_skipbl
 
 #ifdef CONFIG_SMP
-static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
+static cpumask_t cpus_in_xmon; /* = CPUS_MASK_NONE */
 static unsigned long xmon_taken = 1;
 static int xmon_owner;
 static int xmon_gate;
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -38,7 +38,7 @@
 
 volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
 
-cpumask_t smp_commenced_mask = CPU_MASK_NONE;
+cpumask_t smp_commenced_mask; /* = CPU_MASK_NONE */
 
 /* The only guaranteed locking primitive available on all Sparc
  * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -55,9 +55,8 @@
 
 int sparc64_multi_core __read_mostly;
 
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
-	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); /* = CPU_MASK_NONE */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly; /* = CPU_MASK_NONE */
 
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -53,8 +53,8 @@ void smp_send_stop(void)
 	printk(KERN_CONT "done\n");
 }
 
-static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
-static cpumask_t cpu_callin_map = CPU_MASK_NONE;
+static cpumask_t smp_commenced_mask /* = CPU_MASK_NONE */;
+static cpumask_t cpu_callin_map /* = CPU_MASK_NONE */;
 
 static int idle_proc(void *cpup)
 {
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -523,7 +523,7 @@ void cvm_oct_rx_initialize(void)
 		atomic_set(&core_state.available_cores, num_online_cpus());
 	core_state.baseline_cores = atomic_read(&core_state.available_cores);
 
-	core_state.cpu_state = CPU_MASK_NONE;
+	cpumask_clear(&core_state.cpu_state);
 	for_each_possible_cpu(i) {
 		netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
 			       cvm_oct_napi_poll, rx_napi_weight);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -758,133 +758,4 @@ static inline const struct cpumask *get_
 }
 #endif /* NR_CPUS > BITS_PER_LONG */
 
-/*
- *
- * From here down, all obsolete.  Use cpumask_ variants!
- *
- */
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL							\
-(cpumask_t) { {								\
-	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
-} }
-
-#else
-
-#define CPU_MASK_ALL							\
-(cpumask_t) { {								\
-	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
-	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
-} }
-
-#endif
-
-#define CPU_MASK_NONE							\
-(cpumask_t) { {								\
-	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
-} }
-
-#define CPU_MASK_CPU0							\
-(cpumask_t) { {								\
-	[0] =  1UL							\
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
-	bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
-	bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
-	return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
-				__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
-					const cpumask_t *src2p, int nbits)
-{
-	return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
-	return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
-	return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
-			__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-					const cpumask_t *srcp, int n, int nbits)
-{
-	bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-
 #endif /* __LINUX_CPUMASK_H */
