cpumask: replace node_to_cpumask with cpumask_of_node in sched.c

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 kernel/sched.c |   38 ++++++++++++++------------------------
 1 file changed, 14 insertions(+), 24 deletions(-)

diff -r 7b55315f38b7 kernel/sched.c
--- a/kernel/sched.c	Tue Nov 18 21:15:30 2008 +1030
+++ b/kernel/sched.c	Tue Nov 18 21:22:42 2008 +1030
@@ -6136,9 +6136,8 @@
 
 	do {
 		/* On same node? */
-		node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu));
-
-		cpus_and(mask, *pnodemask, p->cpus_allowed);
+		cpumask_and(&mask, cpumask_of_node(cpu_to_node(dead_cpu)),
+			    &p->cpus_allowed);
 		dest_cpu = any_online_cpu(mask);
 
 		/* On any allowed CPU? */
@@ -7038,20 +7037,17 @@
 static void sched_domain_node_span(int node, cpumask_t *span)
 {
 	nodemask_t used_nodes;
-	node_to_cpumask_ptr(nodemask, node);
-	int i;
-
-	cpus_clear(*span);
+	int i;
+
 	nodes_clear(used_nodes);
 
-	cpus_or(*span, *span, *nodemask);
+	cpumask_copy(span, cpumask_of_node(node));
 	node_set(node, used_nodes);
 
 	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
 		int next_node = find_next_best_node(node, &used_nodes);
 
-		node_to_cpumask_ptr_next(nodemask, next_node);
-		cpus_or(*span, *span, *nodemask);
+		cpumask_or(span, span, cpumask_of_node(next_node));
 	}
 }
 #endif /* CONFIG_NUMA */
@@ -7148,9 +7144,8 @@
 				 struct sched_group **sg, cpumask_t *nodemask)
 {
 	int group;
-	node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
-
-	cpus_and(*nodemask, *pnodemask, *cpu_map);
+
+	cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu), cpu_map);
 	group = first_cpu(*nodemask);
 
 	if (sg)
@@ -7200,9 +7195,8 @@
 
 		for (i = 0; i < nr_node_ids; i++) {
 			struct sched_group *oldsg, *sg = sched_group_nodes[i];
-			node_to_cpumask_ptr(pnodemask, i);
-
-			cpus_and(*nodemask, *pnodemask, *cpu_map);
+
+			cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
 			if (cpus_empty(*nodemask))
 				continue;
 
@@ -7410,8 +7404,7 @@
 	for_each_cpu(i, cpu_map) {
 		struct sched_domain *sd = NULL, *p;
 
-		*nodemask = node_to_cpumask(cpu_to_node(i));
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
 
 #ifdef CONFIG_NUMA
 		if (cpus_weight(*cpu_map) >
@@ -7501,8 +7494,7 @@
 
 	/* Set up physical groups */
 	for (i = 0; i < nr_node_ids; i++) {
-		*nodemask = node_to_cpumask(i);
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
 		if (cpus_empty(*nodemask))
 			continue;
 
@@ -7524,10 +7516,9 @@
 		struct sched_group *sg, *prev;
 		int j;
 
-		*nodemask = node_to_cpumask(i);
 		cpus_clear(*covered);
 
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
 		if (cpus_empty(*nodemask)) {
 			sched_group_nodes[i] = NULL;
 			continue;
@@ -7557,7 +7548,6 @@
 
 		for (j = 0; j < nr_node_ids; j++) {
 			int n = (i + j) % nr_node_ids;
-			node_to_cpumask_ptr(pnodemask, n);
 
 			cpus_complement(*notcovered, *covered);
 			cpus_and(*tmpmask, *notcovered, *cpu_map);
@@ -7565,7 +7555,7 @@
 			if (cpus_empty(*tmpmask))
 				break;
 
-			cpus_and(*tmpmask, *tmpmask, *pnodemask);
+			cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
 			if (cpus_empty(*tmpmask))
 				continue;
 
