cpumask: make node_to_cpumask() return a const struct cpumask *

Simple conversion, and note that now we can use the return from
node_to_cpumask() directly, unlike before:

 -	cpumask_t tmp;
 -	tmp = node_to_cpumask(node);
 -	return first_cpu(tmp);
 +	return cpumask_first(node_to_cpumask(node));

Also, the "node_to_cpumask_ptr" macro which declared and a variable is
completely removed.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/ia64/include/asm/topology.h             |    6 +-
 arch/ia64/kernel/acpi.c                      |    2 
 arch/ia64/kernel/iosapic.c                   |    5 +-
 arch/ia64/sn/kernel/sn2/sn_hwperf.c          |    4 -
 arch/powerpc/include/asm/topology.h          |   10 +---
 arch/powerpc/platforms/cell/spu_priv1_mmio.c |    6 --
 arch/powerpc/platforms/cell/spufs/sched.c    |    5 --
 arch/sh/include/asm/topology.h               |    4 -
 arch/sparc/include/asm/topology_64.h         |   17 +-----
 arch/sparc64/kernel/of_device.c              |    2 
 arch/sparc64/kernel/pci_msi.c                |    2 
 arch/x86/kernel/setup_percpu.c               |   40 ++--------------
 drivers/base/node.c                          |    2 
 drivers/pci/pci-driver.c                     |    7 +-
 include/asm-generic/topology.h               |   15 ------
 include/asm-mips/mach-ip27/topology.h        |    4 -
 include/asm-x86/pci.h                        |    2 
 include/asm-x86/topology.h                   |   66 ++++-----------------------
 include/linux/topology.h                     |    6 --
 kernel/sched.c                               |   31 ++++--------
 mm/page_alloc.c                              |    4 -
 mm/quicklist.c                               |    3 -
 mm/slab.c                                    |    3 -
 mm/vmscan.c                                  |    6 +-
 net/sunrpc/svc.c                             |    3 -
 25 files changed, 69 insertions(+), 186 deletions(-)

diff -r e346c0821821 arch/ia64/include/asm/topology.h
--- a/arch/ia64/include/asm/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/ia64/include/asm/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -33,7 +33,7 @@
 /*
  * Returns a bitmask of CPUs on Node 'node'.
  */
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
+#define node_to_cpumask(node) (&node_to_cpu_mask[node])
 
 /*
  * Returns the number of the node containing Node 'nid'.
@@ -45,7 +45,7 @@
 /*
  * Returns the number of the first CPU on Node 'node'.
  */
-#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node)))
+#define node_to_first_cpu(node) (cpumask_first(node_to_cpumask(node)))
 
 /*
  * Determines the node for a given pci bus
@@ -118,7 +118,7 @@ extern void arch_fix_phys_package_id(int
 
 #define pcibus_to_cpumask(bus)	(pcibus_to_node(bus) == -1 ? \
 					CPU_MASK_ALL : \
-					node_to_cpumask(pcibus_to_node(bus)) \
+					*node_to_cpumask(pcibus_to_node(bus)) \
 				)
 
 #include <asm-generic/topology.h>
diff -r e346c0821821 arch/ia64/kernel/acpi.c
--- a/arch/ia64/kernel/acpi.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/ia64/kernel/acpi.c	Sun Oct 05 15:31:46 2008 +1100
@@ -960,7 +960,7 @@ acpi_map_iosapic(acpi_handle handle, u32
 	node = pxm_to_node(pxm);
 
 	if (node >= MAX_NUMNODES || !node_online(node) ||
-	    cpus_empty(node_to_cpumask(node)))
+	    cpumask_empty(node_to_cpumask(node)))
 		return AE_OK;
 
 	/* We know a gsi to node mapping! */
diff -r e346c0821821 arch/ia64/kernel/iosapic.c
--- a/arch/ia64/kernel/iosapic.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/ia64/kernel/iosapic.c	Sun Oct 05 15:31:46 2008 +1100
@@ -702,8 +702,9 @@ get_target_cpu (unsigned int gsi, int ir
 		    iosapic_lists[iosapic_index].node == MAX_NUMNODES)
 			goto skip_numa_setup;
 
-		cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
-		cpus_and(cpu_mask, cpu_mask, domain);
+		cpumask_and(&cpu_mask,
+			    node_to_cpumask(iosapic_lists[iosapic_index].node),
+			    &domain);
 		for_each_cpu_mask(numa_cpu, cpu_mask) {
 			if (!cpu_online(numa_cpu))
 				cpu_clear(numa_cpu, cpu_mask);
diff -r e346c0821821 arch/ia64/sn/kernel/sn2/sn_hwperf.c
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c	Sun Oct 05 15:31:46 2008 +1100
@@ -385,7 +385,7 @@ static int sn_topology_show(struct seq_f
 	int j;
 	const char *slabname;
 	int ordinal;
-	cpumask_t cpumask;
+	const struct cpumask *cpumask;
 	char slice;
 	struct cpuinfo_ia64 *c;
 	struct sn_hwperf_port_info *ptdata;
@@ -475,7 +475,7 @@ static int sn_topology_show(struct seq_f
 		if (!SN_HWPERF_IS_IONODE(obj)) {
 			cpumask = node_to_cpumask(ordinal);
 			for_each_online_cpu(i) {
-				if (cpu_isset(i, cpumask)) {
+				if (cpumask_test_cpu(i, cpumask)) {
 					slice = 'a' + cpuid_to_slice(i);
 					c = cpu_data(i);
 					seq_printf(s, "cpu %d %s%c local"
diff -r e346c0821821 arch/powerpc/include/asm/topology.h
--- a/arch/powerpc/include/asm/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/powerpc/include/asm/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -17,16 +17,14 @@ static inline int cpu_to_node(int cpu)
 
 #define parent_node(node)	(node)
 
-static inline cpumask_t node_to_cpumask(int node)
+static inline const struct cpumask *node_to_cpumask(int node)
 {
-	return numa_cpumask_lookup_table[node];
+	return &numa_cpumask_lookup_table[node];
 }
 
 static inline int node_to_first_cpu(int node)
 {
-	cpumask_t tmp;
-	tmp = node_to_cpumask(node);
-	return first_cpu(tmp);
+	return cpumask_first(node_to_cpumask(node));
 }
 
 int of_node_to_nid(struct device_node *device);
@@ -43,7 +41,7 @@ static inline int pcibus_to_node(struct 
 
 #define pcibus_to_cpumask(bus)	(pcibus_to_node(bus) == -1 ? \
 					CPU_MASK_ALL : \
-					node_to_cpumask(pcibus_to_node(bus)) \
+					*node_to_cpumask(pcibus_to_node(bus)) \
 				)
 
 /* sched_domains SD_NODE_INIT for PPC64 machines */
diff -r e346c0821821 arch/powerpc/platforms/cell/spu_priv1_mmio.c
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c	Sun Oct 05 15:31:46 2008 +1100
@@ -80,10 +80,8 @@ static void cpu_affinity_set(struct spu 
 	u64 route;
 
 	if (nr_cpus_node(spu->node)) {
-		cpumask_t spumask = node_to_cpumask(spu->node);
-		cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
-
-		if (!cpus_intersects(spumask, cpumask))
+		if (!cpumask_intersects(node_to_cpumask(spu->node),
+					node_to_cpumask(cpu_to_node(cpu))))
 			return;
 	}
 
diff -r e346c0821821 arch/powerpc/platforms/cell/spufs/sched.c
--- a/arch/powerpc/platforms/cell/spufs/sched.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/powerpc/platforms/cell/spufs/sched.c	Sun Oct 05 15:31:46 2008 +1100
@@ -166,9 +166,8 @@ static int __node_allowed(struct spu_con
 static int __node_allowed(struct spu_context *ctx, int node)
 {
 	if (nr_cpus_node(node)) {
-		cpumask_t mask = node_to_cpumask(node);
-
-		if (cpus_intersects(mask, ctx->cpus_allowed))
+		if (cpumask_intersects(node_to_cpumask(node),
+				       ctx->cpus_allowed))
 			return 1;
 	}
 
diff -r e346c0821821 arch/sh/include/asm/topology.h
--- a/arch/sh/include/asm/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/sh/include/asm/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -32,13 +32,13 @@
 #define cpu_to_node(cpu)	((void)(cpu),0)
 #define parent_node(node)	((void)(node),0)
 
-#define node_to_cpumask(node)	((void)node, cpu_online_map)
+#define node_to_cpumask(node)	((void)node, cpu_online_mask)
 #define node_to_first_cpu(node)	((void)(node),0)
 
 #define pcibus_to_node(bus)	((void)(bus), -1)
 #define pcibus_to_cpumask(bus)	(pcibus_to_node(bus) == -1 ? \
 					CPU_MASK_ALL : \
-					node_to_cpumask(pcibus_to_node(bus)) \
+					*node_to_cpumask(pcibus_to_node(bus)) \
 				)
 #endif
 
diff -r e346c0821821 arch/sparc/include/asm/topology_64.h
--- a/arch/sparc/include/asm/topology_64.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/sparc/include/asm/topology_64.h	Sun Oct 05 15:31:46 2008 +1100
@@ -12,23 +12,14 @@ static inline int cpu_to_node(int cpu)
 
 #define parent_node(node)	(node)
 
-static inline cpumask_t node_to_cpumask(int node)
+static inline struct cpumask *node_to_cpumask(int node)
 {
-	return numa_cpumask_lookup_table[node];
+	return &numa_cpumask_lookup_table[node];
 }
-
-/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
-#define node_to_cpumask_ptr(v, node)		\
-		cpumask_t *v = &(numa_cpumask_lookup_table[node])
-
-#define node_to_cpumask_ptr_next(v, node)	\
-			   v = &(numa_cpumask_lookup_table[node])
 
 static inline int node_to_first_cpu(int node)
 {
-	cpumask_t tmp;
-	tmp = node_to_cpumask(node);
-	return first_cpu(tmp);
+	return cpumask_first(node_to_cpumask(node));
 }
 
 struct pci_bus;
@@ -44,7 +35,7 @@ static inline int pcibus_to_node(struct 
 #define pcibus_to_cpumask(bus)	\
 	(pcibus_to_node(bus) == -1 ? \
 	 CPU_MASK_ALL : \
-	 node_to_cpumask(pcibus_to_node(bus)))
+	 *node_to_cpumask(pcibus_to_node(bus)))
 
 #define SD_NODE_INIT (struct sched_domain) {		\
 	.min_interval		= 8,			\
diff -r e346c0821821 arch/sparc64/kernel/of_device.c
--- a/arch/sparc64/kernel/of_device.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/sparc64/kernel/of_device.c	Sun Oct 05 15:31:46 2008 +1100
@@ -747,7 +747,7 @@ out:
 out:
 	nid = of_node_to_nid(dp);
 	if (nid != -1) {
-		irq_set_affinity(irq, node_to_cpumask_ptr(nid));
+		irq_set_affinity(irq, node_to_cpumask(nid));
 	}
 
 	return irq;
diff -r e346c0821821 arch/sparc64/kernel/pci_msi.c
--- a/arch/sparc64/kernel/pci_msi.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/sparc64/kernel/pci_msi.c	Sun Oct 05 15:31:46 2008 +1100
@@ -286,7 +286,7 @@ static int bringup_one_msi_queue(struct 
 
 	nid = pbm->numa_node;
 	if (nid != -1) {
-		irq_set_affinity(irq, node_to_cpumask_ptr(nid));
+		irq_set_affinity(irq, node_to_cpumask(nid));
 	}
 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
 			  "MSIQ",
diff -r e346c0821821 arch/x86/kernel/setup_percpu.c
--- a/arch/x86/kernel/setup_percpu.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/arch/x86/kernel/setup_percpu.c	Sun Oct 05 15:31:46 2008 +1100
@@ -315,56 +315,26 @@ int early_cpu_to_node(int cpu)
 	return per_cpu(x86_cpu_to_node_map, cpu);
 }
 
-
-/* empty cpumask */
-static const cpumask_t cpu_mask_none;
-
 /*
  * Returns a pointer to the bitmask of CPUs on Node 'node'.
  */
-const cpumask_t *_node_to_cpumask_ptr(int node)
+const struct cpumask *node_to_cpumask(int node)
 {
 	if (node_to_cpumask_map == NULL) {
 		printk(KERN_WARNING
-			"_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
+			"node_to_cpumask(%d): no node_to_cpumask_map!\n",
 			node);
 		dump_stack();
-		return (const cpumask_t *)&cpu_online_map;
-	}
-	if (node >= nr_node_ids) {
-		printk(KERN_WARNING
-			"_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
-			node, nr_node_ids);
-		dump_stack();
-		return &cpu_mask_none;
-	}
-	return &node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(_node_to_cpumask_ptr);
-
-/*
- * Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used.  The
- * node_to_cpumask_ptr function should be used whenever possible.
- */
-cpumask_t node_to_cpumask(int node)
-{
-	if (node_to_cpumask_map == NULL) {
-		printk(KERN_WARNING
-			"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
-		dump_stack();
-		return cpu_online_map;
+		return cpu_online_mask;
 	}
 	if (node >= nr_node_ids) {
 		printk(KERN_WARNING
 			"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
 			node, nr_node_ids);
 		dump_stack();
-		return cpu_mask_none;
+		return cpu_none_mask;
 	}
-	return node_to_cpumask_map[node];
+	return &node_to_cpumask_map[node];
 }
 EXPORT_SYMBOL(node_to_cpumask);
 
diff -r e346c0821821 drivers/base/node.c
--- a/drivers/base/node.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/drivers/base/node.c	Sun Oct 05 15:31:46 2008 +1100
@@ -22,7 +22,7 @@ static ssize_t node_read_cpumap(struct s
 static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
 {
 	struct node *node_dev = to_node(dev);
-	node_to_cpumask_ptr(mask, node_dev->sysdev.id);
+	const struct cpumask *mask = node_to_cpumask(node_dev->sysdev.id);
 	int len;
 
 	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
diff -r e346c0821821 drivers/pci/pci-driver.c
--- a/drivers/pci/pci-driver.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/drivers/pci/pci-driver.c	Sun Oct 05 15:31:46 2008 +1100
@@ -183,10 +183,9 @@ static int pci_call_probe(struct pci_dri
 	cpumask_t oldmask = current->cpus_allowed;
 	int node = dev_to_node(&dev->dev);
 
-	if (node >= 0) {
-		node_to_cpumask_ptr(nodecpumask, node);
-		set_cpus_allowed_ptr(current, nodecpumask);
-	}
+	if (node >= 0)
+		set_cpus_allowed_ptr(current, node_to_cpumask(node));
+
 	/* And set default memory allocation policy */
 	oldpol = current->mempolicy;
 	current->mempolicy = NULL;	/* fall back to system default policy */
diff -r e346c0821821 include/asm-generic/topology.h
--- a/include/asm-generic/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/include/asm-generic/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -38,7 +38,7 @@
 #define parent_node(node)	((void)(node),0)
 #endif
 #ifndef node_to_cpumask
-#define node_to_cpumask(node)	((void)node, cpu_online_map)
+#define node_to_cpumask(node)	((void)node, cpu_online_mask)
 #endif
 #ifndef node_to_first_cpu
 #define node_to_first_cpu(node)	((void)(node),0)
@@ -50,21 +50,10 @@
 #ifndef pcibus_to_cpumask
 #define pcibus_to_cpumask(bus)	(pcibus_to_node(bus) == -1 ? \
 					CPU_MASK_ALL : \
-					node_to_cpumask(pcibus_to_node(bus)) \
+					*node_to_cpumask(pcibus_to_node(bus)) \
 				)
 #endif
 
 #endif	/* CONFIG_NUMA */
 
-/* returns pointer to cpumask for specified node */
-#ifndef node_to_cpumask_ptr
-
-#define	node_to_cpumask_ptr(v, node) 					\
-		cpumask_t _##v = node_to_cpumask(node);			\
-		const cpumask_t *v = &_##v
-
-#define node_to_cpumask_ptr_next(v, node)				\
-			  _##v = node_to_cpumask(node)
-#endif
-
 #endif /* _ASM_GENERIC_TOPOLOGY_H */
diff -r e346c0821821 include/asm-mips/mach-ip27/topology.h
--- a/include/asm-mips/mach-ip27/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/include/asm-mips/mach-ip27/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -24,8 +24,8 @@ extern struct cpuinfo_ip27 sn_cpu_info[N
 
 #define cpu_to_node(cpu)	(sn_cpu_info[(cpu)].p_nodeid)
 #define parent_node(node)	(node)
-#define node_to_cpumask(node)	(hub_data(node)->h_cpus)
-#define node_to_first_cpu(node)	(first_cpu(node_to_cpumask(node)))
+#define node_to_cpumask(node)	(&hub_data(node)->h_cpus)
+#define node_to_first_cpu(node)	(cpumask_first(node_to_cpumask(node)))
 struct pci_bus;
 extern int pcibus_to_node(struct pci_bus *);
 
diff -r e346c0821821 include/asm-x86/pci.h
--- a/include/asm-x86/pci.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/include/asm-x86/pci.h	Sun Oct 05 15:31:46 2008 +1100
@@ -107,7 +107,7 @@ static inline int __pcibus_to_node(struc
 
 static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
 {
-	return node_to_cpumask(__pcibus_to_node(bus));
+	return *node_to_cpumask(__pcibus_to_node(bus));
 }
 #endif
 
diff -r e346c0821821 include/asm-x86/topology.h
--- a/include/asm-x86/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/include/asm-x86/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -57,17 +57,6 @@ static inline int cpu_to_node(int cpu)
 }
 #define early_cpu_to_node(cpu)	cpu_to_node(cpu)
 
-/* Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used.  The
- * node_to_cpumask_ptr function should be used whenever possible.
- */
-static inline cpumask_t node_to_cpumask(int node)
-{
-	return node_to_cpumask_map[node];
-}
-
 #else /* CONFIG_X86_64 */
 
 /* Mappings between node number and cpus on that node. */
@@ -82,8 +71,6 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_no
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 extern int cpu_to_node(int cpu);
 extern int early_cpu_to_node(int cpu);
-extern const cpumask_t *_node_to_cpumask_ptr(int node);
-extern cpumask_t node_to_cpumask(int node);
 
 #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
 
@@ -102,28 +89,14 @@ static inline int early_cpu_to_node(int 
 	return per_cpu(x86_cpu_to_node_map, cpu);
 }
 
-/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
-static inline const cpumask_t *_node_to_cpumask_ptr(int node)
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
+#endif /* CONFIG_X86_64 */
+
+/* Returns a bitmask of CPUs on Node 'node'. */
+static inline const struct cpumask *node_to_cpumask(int node)
 {
 	return &node_to_cpumask_map[node];
 }
-
-/* Returns a bitmask of CPUs on Node 'node'. */
-static inline cpumask_t node_to_cpumask(int node)
-{
-	return node_to_cpumask_map[node];
-}
-
-#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
-
-/* Replace default node_to_cpumask_ptr with optimized version */
-#define node_to_cpumask_ptr(v, node)		\
-		const cpumask_t *v = _node_to_cpumask_ptr(node)
-
-#define node_to_cpumask_ptr_next(v, node)	\
-			   v = _node_to_cpumask_ptr(node)
-
-#endif /* CONFIG_X86_64 */
 
 /*
  * Returns the number of the node containing Node 'node'. This
@@ -186,37 +159,18 @@ extern int __node_distance(int, int);
 #define	cpu_to_node(cpu)	0
 #define	early_cpu_to_node(cpu)	0
 
-static inline const cpumask_t *_node_to_cpumask_ptr(int node)
+static inline const struct cpumask *node_to_cpumask(int node)
 {
-	return &cpu_online_map;
+	return cpu_online_mask;
 }
-static inline cpumask_t node_to_cpumask(int node)
-{
-	return cpu_online_map;
-}
+#endif /* !CONFIG_NUMA */
+
 static inline int node_to_first_cpu(int node)
 {
-	return first_cpu(cpu_online_map);
+	return cpumask_first(node_to_cpumask(node));
 }
 
-/* Replace default node_to_cpumask_ptr with optimized version */
-#define node_to_cpumask_ptr(v, node)		\
-		const cpumask_t *v = _node_to_cpumask_ptr(node)
-
-#define node_to_cpumask_ptr_next(v, node)	\
-			   v = _node_to_cpumask_ptr(node)
-#endif
-
 #include <asm-generic/topology.h>
-
-#ifdef CONFIG_NUMA
-/* Returns the number of the first CPU on Node 'node'. */
-static inline int node_to_first_cpu(int node)
-{
-	node_to_cpumask_ptr(mask, node);
-	return first_cpu(*mask);
-}
-#endif
 
 extern const struct cpumask *cpu_coregroup_map(int cpu);
 
diff -r e346c0821821 include/linux/topology.h
--- a/include/linux/topology.h	Sun Oct 05 15:19:42 2008 +1100
+++ b/include/linux/topology.h	Sun Oct 05 15:31:46 2008 +1100
@@ -38,11 +38,7 @@
 #endif
 
 #ifndef nr_cpus_node
-#define nr_cpus_node(node)				\
-	({						\
-		node_to_cpumask_ptr(__tmp__, node);	\
-		cpus_weight(*__tmp__);			\
-	})
+#define nr_cpus_node(node) cpumask_weight(node_to_cpumask(node))
 #endif
 
 #define for_each_node_with_cpus(node)			\
diff -r e346c0821821 kernel/sched.c
--- a/kernel/sched.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/kernel/sched.c	Sun Oct 05 15:31:46 2008 +1100
@@ -6054,7 +6054,7 @@ static void move_task_off_dead_cpu(int d
 
 	do {
 		/* On same node? */
-		mask = node_to_cpumask(cpu_to_node(dead_cpu));
+		mask = *node_to_cpumask(cpu_to_node(dead_cpu));
 		cpus_and(mask, mask, p->cpus_allowed);
 		dest_cpu = cpumask_any_both(cpu_online_mask, &mask);
 
@@ -6954,20 +6954,17 @@ static void sched_domain_node_span(int n
 static void sched_domain_node_span(int node, cpumask_t *span)
 {
 	nodemask_t used_nodes;
-	node_to_cpumask_ptr(nodemask, node);
-	int i;
-
-	cpus_clear(*span);
+	int i;
+
 	nodes_clear(used_nodes);
 
-	cpus_or(*span, *span, *nodemask);
+	cpumask_copy(span, node_to_cpumask(node));
 	node_set(node, used_nodes);
 
 	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
 		int next_node = find_next_best_node(node, &used_nodes);
 
-		node_to_cpumask_ptr_next(nodemask, next_node);
-		cpus_or(*span, *span, *nodemask);
+		cpumask_or(span, span, node_to_cpumask(next_node));
 	}
 }
 #endif /* CONFIG_NUMA */
@@ -7065,8 +7062,7 @@ static int cpu_to_allnodes_group(int cpu
 {
 	int group;
 
-	*nodemask = node_to_cpumask(cpu_to_node(cpu));
-	cpus_and(*nodemask, *nodemask, *cpu_map);
+	cpumask_and(nodemask, node_to_cpumask(cpu_to_node(cpu)), cpu_map);
 	group = first_cpu(*nodemask);
 
 	if (sg)
@@ -7117,8 +7113,7 @@ static void free_sched_groups(const cpum
 		for (i = 0; i < nr_node_ids; i++) {
 			struct sched_group *oldsg, *sg = sched_group_nodes[i];
 
-			*nodemask = node_to_cpumask(i);
-			cpus_and(*nodemask, *nodemask, *cpu_map);
+			cpumask_and(nodemask, node_to_cpumask(i), cpu_map);
 			if (cpus_empty(*nodemask))
 				continue;
 
@@ -7318,8 +7313,7 @@ static int __build_sched_domains(const c
 	for_each_cpu(i, cpu_map) {
 		struct sched_domain *sd = NULL, *p;
 
-		*nodemask = node_to_cpumask(cpu_to_node(i));
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, node_to_cpumask(cpu_to_node(i)), cpu_map);
 
 #ifdef CONFIG_NUMA
 		if (cpus_weight(*cpu_map) >
@@ -7410,8 +7404,7 @@ static int __build_sched_domains(const c
 
 	/* Set up physical groups */
 	for (i = 0; i < nr_node_ids; i++) {
-		*nodemask = node_to_cpumask(i);
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, node_to_cpumask(i), cpu_map);
 		if (cpus_empty(*nodemask))
 			continue;
 
@@ -7433,10 +7426,9 @@ static int __build_sched_domains(const c
 		struct sched_group *sg, *prev;
 		int j;
 
-		*nodemask = node_to_cpumask(i);
 		cpus_clear(*covered);
 
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, node_to_cpumask(i), cpu_map);
 		if (cpus_empty(*nodemask)) {
 			sched_group_nodes[i] = NULL;
 			continue;
@@ -7466,7 +7458,6 @@ static int __build_sched_domains(const c
 
 		for (j = 0; j < nr_node_ids; j++) {
 			int n = (i + j) % nr_node_ids;
-			node_to_cpumask_ptr(pnodemask, n);
 
 			cpus_complement(*notcovered, *covered);
 			cpus_and(*tmpmask, *notcovered, *cpu_map);
@@ -7474,7 +7465,7 @@ static int __build_sched_domains(const c
 			if (cpus_empty(*tmpmask))
 				break;
 
-			cpus_and(*tmpmask, *tmpmask, *pnodemask);
+			cpumask_and(tmpmask, tmpmask, node_to_cpumask(n));
 			if (cpus_empty(*tmpmask))
 				continue;
 
diff -r e346c0821821 mm/page_alloc.c
--- a/mm/page_alloc.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/mm/page_alloc.c	Sun Oct 05 15:31:46 2008 +1100
@@ -2080,7 +2080,6 @@ static int find_next_best_node(int node,
 	int n, val;
 	int min_val = INT_MAX;
 	int best_node = -1;
-	node_to_cpumask_ptr(tmp, 0);
 
 	/* Use the local node if we haven't already */
 	if (!node_isset(node, *used_node_mask)) {
@@ -2101,8 +2100,7 @@ static int find_next_best_node(int node,
 		val += (n < node);
 
 		/* Give preference to headless and unused nodes */
-		node_to_cpumask_ptr_next(tmp, n);
-		if (!cpus_empty(*tmp))
+		if (!cpumask_empty(node_to_cpumask(n)))
 			val += PENALTY_FOR_NODE_WITH_CPUS;
 
 		/* Slight preference for less loaded node */
diff -r e346c0821821 mm/quicklist.c
--- a/mm/quicklist.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/mm/quicklist.c	Sun Oct 05 15:31:46 2008 +1100
@@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned 
 	int node = numa_node_id();
 	struct zone *zones = NODE_DATA(node)->node_zones;
 	int num_cpus_on_node;
-	node_to_cpumask_ptr(cpumask_on_node, node);
 
 	node_free_pages =
 #ifdef CONFIG_ZONE_DMA
@@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned 
 
 	max = node_free_pages / FRACTION_OF_NODE_MEM;
 
-	num_cpus_on_node = cpus_weight(*cpumask_on_node);
+	num_cpus_on_node = cpumask_weight(node_to_cpumask(node));
 	max /= num_cpus_on_node;
 
 	return max(max, min_pages);
diff -r e346c0821821 mm/slab.c
--- a/mm/slab.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/mm/slab.c	Sun Oct 05 15:31:46 2008 +1100
@@ -1159,7 +1159,6 @@ static void __cpuinit cpuup_canceled(lon
 	struct kmem_cache *cachep;
 	struct kmem_list3 *l3 = NULL;
 	int node = cpu_to_node(cpu);
-	node_to_cpumask_ptr(mask, node);
 
 	list_for_each_entry(cachep, &cache_chain, next) {
 		struct array_cache *nc;
@@ -1181,7 +1180,7 @@ static void __cpuinit cpuup_canceled(lon
 		if (nc)
 			free_block(cachep, nc->entry, nc->avail, node);
 
-		if (!cpus_empty(*mask)) {
+		if (!cpumask_empty(node_to_cpumask(node))) {
 			spin_unlock_irq(&l3->list_lock);
 			goto free_array_cache;
 		}
diff -r e346c0821821 mm/vmscan.c
--- a/mm/vmscan.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/mm/vmscan.c	Sun Oct 05 15:31:46 2008 +1100
@@ -1687,7 +1687,7 @@ static int kswapd(void *p)
 	struct reclaim_state reclaim_state = {
 		.reclaimed_slab = 0,
 	};
-	node_to_cpumask_ptr(cpumask, pgdat->node_id);
+	const struct cpumask *cpumask = node_to_cpumask(pgdat->node_id);
 
 	if (!cpus_empty(*cpumask))
 		set_cpus_allowed_ptr(tsk, cpumask);
@@ -1924,7 +1924,9 @@ static int __devinit cpu_callback(struct
 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
 		for_each_node_state(nid, N_HIGH_MEMORY) {
 			pg_data_t *pgdat = NODE_DATA(nid);
-			node_to_cpumask_ptr(mask, pgdat->node_id);
+			const struct cpumask *mask;
+
+			mask = node_to_cpumask(pgdat->node_id);
 
 			if (cpumask_any_both(cpu_online_mask, mask) < nr_cpu_ids)
 				/* One of our CPUs online: restore mask */
diff -r e346c0821821 net/sunrpc/svc.c
--- a/net/sunrpc/svc.c	Sun Oct 05 15:19:42 2008 +1100
+++ b/net/sunrpc/svc.c	Sun Oct 05 15:31:46 2008 +1100
@@ -315,8 +315,7 @@ svc_pool_map_set_cpumask(struct task_str
 	}
 	case SVC_POOL_PERNODE:
 	{
-		node_to_cpumask_ptr(nodecpumask, node);
-		set_cpus_allowed_ptr(task, nodecpumask);
+		set_cpus_allowed_ptr(task, node_to_cpumask(node));
 		break;
 	}
 	}
