cpumask: make cpu_coregroup_map() return a const struct cpumask

Instead of returning a cpumask_t, make it return a pointer.

For most archs this is trivial; the S/390 version was also used
internally to set up the cpu_core_map[] though, so I renamed that.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 arch/s390/include/asm/topology.h     |    2 +-
 arch/s390/kernel/topology.c          |   29 ++++++++++++++++++-----------
 arch/sparc/include/asm/topology_64.h |    2 +-
 arch/x86/include/asm/topology.h      |    2 +-
 arch/x86/kernel/smpboot.c            |    6 +++---
 block/blk.h                          |    6 +++---
 kernel/sched.c                       |    6 +++---
 7 files changed, 30 insertions(+), 23 deletions(-)

diff -r 215c89e1b020 arch/s390/include/asm/topology.h
--- a/arch/s390/include/asm/topology.h	Sat Nov 08 00:03:47 2008 +1100
+++ b/arch/s390/include/asm/topology.h	Sat Nov 08 00:05:23 2008 +1100
@@ -5,7 +5,7 @@
 
 #define mc_capable()	(1)
 
-cpumask_t cpu_coregroup_map(unsigned int cpu);
+const struct cpumask *cpu_coregroup_map(unsigned int cpu);
 
 extern cpumask_t cpu_core_map[NR_CPUS];
 
diff -r 215c89e1b020 arch/s390/kernel/topology.c
--- a/arch/s390/kernel/topology.c	Sat Nov 08 00:03:47 2008 +1100
+++ b/arch/s390/kernel/topology.c	Sat Nov 08 00:05:23 2008 +1100
@@ -68,26 +68,33 @@ static DECLARE_WORK(topology_work, topol
 
 cpumask_t cpu_core_map[NR_CPUS];
 
-cpumask_t cpu_coregroup_map(unsigned int cpu)
+const struct cpumask *cpu_coregroup_map(unsigned int cpu)
+{
+	return &cpu_core_map[cpu];
+}
+
+/* Initialize cpu_core_map[cpu] */
+static void set_cpu_coregroup_map(unsigned int cpu)
 {
 	struct core_info *core = &core_info;
-	cpumask_t mask;
+	struct cpumask *mask = &cpu_core_map[cpu];
 
-	cpus_clear(mask);
-	if (!machine_has_topology)
-		return cpu_present_map;
+	cpumask_clear(mask);
+	if (!machine_has_topology) {
+		cpumask_copy(mask, cpu_present_mask);
+		return;
+	}
 	mutex_lock(&smp_cpu_state_mutex);
 	while (core) {
-		if (cpu_isset(cpu, core->mask)) {
-			mask = core->mask;
+		if (cpumask_test_cpu(cpu, &core->mask)) {
+			cpumask_copy(mask, &core->mask);
 			break;
 		}
 		core = core->next;
 	}
 	mutex_unlock(&smp_cpu_state_mutex);
-	if (cpus_empty(mask))
-		mask = cpumask_of_cpu(cpu);
-	return mask;
+	if (cpumask_empty(mask))
+		cpumask_copy(mask, cpumask_of(cpu));
 }
 
 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
@@ -206,7 +213,7 @@ static void update_cpu_core_map(void)
 	int cpu;
 
 	for_each_present_cpu(cpu)
-		cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+		set_cpu_coregroup_map(cpu);
 }
 
 void arch_update_cpu_topology(void)
diff -r 215c89e1b020 arch/sparc/include/asm/topology_64.h
--- a/arch/sparc/include/asm/topology_64.h	Sat Nov 08 00:03:47 2008 +1100
+++ b/arch/sparc/include/asm/topology_64.h	Sat Nov 08 00:05:23 2008 +1100
@@ -64,6 +64,6 @@ static inline int pcibus_to_node(struct 
 #define smt_capable()				(sparc64_multi_core)
 #endif /* CONFIG_SMP */
 
-#define cpu_coregroup_map(cpu)			(cpu_core_map[cpu])
+#define cpu_coregroup_map(cpu)			(&cpu_core_map[cpu])
 
 #endif /* _ASM_SPARC64_TOPOLOGY_H */
diff -r 215c89e1b020 arch/x86/include/asm/topology.h
--- a/arch/x86/include/asm/topology.h	Sat Nov 08 00:03:47 2008 +1100
+++ b/arch/x86/include/asm/topology.h	Sat Nov 08 00:05:23 2008 +1100
@@ -190,7 +190,7 @@ static inline int node_to_first_cpu(int 
 }
 #endif
 
-extern cpumask_t cpu_coregroup_map(int cpu);
+extern const struct cpumask *cpu_coregroup_map(int cpu);
 
 #ifdef ENABLE_TOPO_DEFINES
 #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
diff -r 215c89e1b020 arch/x86/kernel/smpboot.c
--- a/arch/x86/kernel/smpboot.c	Sat Nov 08 00:03:47 2008 +1100
+++ b/arch/x86/kernel/smpboot.c	Sat Nov 08 00:05:23 2008 +1100
@@ -497,7 +497,7 @@ void __cpuinit set_cpu_sibling_map(int c
 }
 
 /* maps the cpu to the sched domain representing multi-core */
-cpumask_t cpu_coregroup_map(int cpu)
+const struct cpumask *cpu_coregroup_map(int cpu)
 {
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	/*
@@ -505,9 +505,9 @@ cpumask_t cpu_coregroup_map(int cpu)
 	 * And for power savings, we return cpu_core_map
 	 */
 	if (sched_mc_power_savings || sched_smt_power_savings)
-		return per_cpu(cpu_core_map, cpu);
+		return &per_cpu(cpu_core_map, cpu);
 	else
-		return c->llc_shared_map;
+		return &c->llc_shared_map;
 }
 
 static void impress_friends(void)
diff -r 215c89e1b020 block/blk.h
--- a/block/blk.h	Sat Nov 08 00:03:47 2008 +1100
+++ b/block/blk.h	Sat Nov 08 00:05:23 2008 +1100
@@ -99,10 +99,10 @@ static inline int blk_cpu_to_group(int c
 static inline int blk_cpu_to_group(int cpu)
 {
 #ifdef CONFIG_SCHED_MC
-	cpumask_t mask = cpu_coregroup_map(cpu);
-	return first_cpu(mask);
+	const struct cpumask *mask = cpu_coregroup_map(cpu);
+	return cpumask_first(mask);
 #elif defined(CONFIG_SCHED_SMT)
-	return first_cpu(per_cpu(cpu_sibling_map, cpu));
+	return cpumask_first(from_cpumask_t(per_cpu(cpu_sibling_map, cpu)));
 #else
 	return cpu;
 #endif
diff -r 215c89e1b020 kernel/sched.c
--- a/kernel/sched.c	Sat Nov 08 00:03:47 2008 +1100
+++ b/kernel/sched.c	Sat Nov 08 00:05:23 2008 +1100
@@ -7098,7 +7098,7 @@ cpu_to_phys_group(int cpu, const cpumask
 {
 	int group;
 #ifdef CONFIG_SCHED_MC
-	*mask = cpu_coregroup_map(cpu);
+	*mask = *cpu_coregroup_map(cpu);
 	cpus_and(*mask, *mask, *cpu_map);
 	group = first_cpu(*mask);
 #elif defined(CONFIG_SCHED_SMT)
@@ -7431,7 +7431,7 @@ static int __build_sched_domains(const c
 		sd = &per_cpu(core_domains, i);
 		SD_INIT(sd, MC);
 		set_domain_attribute(sd, attr);
-		sd->span = cpu_coregroup_map(i);
+		sd->span = *cpu_coregroup_map(i);
 		cpus_and(sd->span, sd->span, *cpu_map);
 		sd->parent = p;
 		p->child = sd;
@@ -7468,7 +7468,7 @@ static int __build_sched_domains(const c
 #ifdef CONFIG_SCHED_MC
 	/* Set up multi-core groups */
 	for_each_cpu_mask_nr(i, *cpu_map) {
-		*this_core_map = cpu_coregroup_map(i);
+		*this_core_map = *cpu_coregroup_map(i);
 		cpus_and(*this_core_map, *this_core_map, *cpu_map);
 		if (i != first_cpu(*this_core_map))
 			continue;
