---
 kernel/sched.c |   28 +++++++++++-----------------
 1 file changed, 11 insertions(+), 17 deletions(-)

diff -r a69d98fd3c95 kernel/sched.c
--- a/kernel/sched.c	Thu Nov 06 23:41:15 2008 +1100
+++ b/kernel/sched.c	Fri Nov 07 00:01:46 2008 +1100
@@ -6123,7 +6123,7 @@ static void move_task_off_dead_cpu(int d
 
 	do {
 		/* On same node? */
-		mask = node_to_cpumask(cpu_to_node(dead_cpu));
+		mask = *cpumask_for_node(cpu_to_node(dead_cpu));
 		cpus_and(mask, mask, p->cpus_allowed);
 		dest_cpu = any_online_cpu(mask);
 
@@ -7022,20 +7022,17 @@ static void sched_domain_node_span(int n
 static void sched_domain_node_span(int node, cpumask_t *span)
 {
 	nodemask_t used_nodes;
-	node_to_cpumask_ptr(nodemask, node);
-	int i;
-
-	cpus_clear(*span);
+	int i;
+
 	nodes_clear(used_nodes);
 
-	cpus_or(*span, *span, *nodemask);
+	cpumask_copy(span, cpumask_for_node(node));
 	node_set(node, used_nodes);
 
 	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
 		int next_node = find_next_best_node(node, &used_nodes);
 
-		node_to_cpumask_ptr_next(nodemask, next_node);
-		cpus_or(*span, *span, *nodemask);
+		cpumask_or(span, span, cpumask_for_node(next_node));
 	}
 }
 #endif /* CONFIG_NUMA */
@@ -7133,7 +7130,7 @@ static int cpu_to_allnodes_group(int cpu
 {
 	int group;
 
-	*nodemask = node_to_cpumask(cpu_to_node(cpu));
+	*nodemask = *cpumask_for_node(cpu_to_node(cpu));
 	cpus_and(*nodemask, *nodemask, *cpu_map);
 	group = first_cpu(*nodemask);
 
@@ -7185,7 +7182,7 @@ static void free_sched_groups(const cpum
 		for (i = 0; i < nr_node_ids; i++) {
 			struct sched_group *oldsg, *sg = sched_group_nodes[i];
 
-			*nodemask = node_to_cpumask(i);
+			*nodemask = *cpumask_for_node(i);
 			cpus_and(*nodemask, *nodemask, *cpu_map);
 			if (cpus_empty(*nodemask))
 				continue;
@@ -7426,8 +7423,7 @@ static int __build_sched_domains(const c
 		struct sched_domain *sd = NULL, *p;
 		SCHED_CPUMASK_VAR(nodemask, allmasks);
 
-		*nodemask = node_to_cpumask(cpu_to_node(i));
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, cpumask_for_node(cpu_to_node(i)), cpu_map);
 
 #ifdef CONFIG_NUMA
 		if (cpus_weight(*cpu_map) >
@@ -7526,7 +7522,7 @@ static int __build_sched_domains(const c
 		SCHED_CPUMASK_VAR(nodemask, allmasks);
 		SCHED_CPUMASK_VAR(send_covered, allmasks);
 
-		*nodemask = node_to_cpumask(i);
+		*nodemask = *cpumask_for_node(i);
 		cpus_and(*nodemask, *nodemask, *cpu_map);
 		if (cpus_empty(*nodemask))
 			continue;
@@ -7554,10 +7550,9 @@ static int __build_sched_domains(const c
 		SCHED_CPUMASK_VAR(covered, allmasks);
 		int j;
 
-		*nodemask = node_to_cpumask(i);
 		cpus_clear(*covered);
 
-		cpus_and(*nodemask, *nodemask, *cpu_map);
+		cpumask_and(nodemask, cpumask_for_node(i), cpu_map);
 		if (cpus_empty(*nodemask)) {
 			sched_group_nodes[i] = NULL;
 			continue;
@@ -7588,7 +7583,6 @@ static int __build_sched_domains(const c
 		for (j = 0; j < nr_node_ids; j++) {
 			SCHED_CPUMASK_VAR(notcovered, allmasks);
 			int n = (i + j) % nr_node_ids;
-			node_to_cpumask_ptr(pnodemask, n);
 
 			cpus_complement(*notcovered, *covered);
 			cpus_and(*tmpmask, *notcovered, *cpu_map);
@@ -7596,7 +7590,7 @@ static int __build_sched_domains(const c
 			if (cpus_empty(*tmpmask))
 				break;
 
-			cpus_and(*tmpmask, *tmpmask, *pnodemask);
+			cpumask_and(tmpmask, tmpmask, cpumask_for_node(n));
 			if (cpus_empty(*tmpmask))
 				continue;
 
