cpumask: make cpu_coregroup_map() return a const struct cpumask *

Instead of returning a cpumask_t, make it return a pointer.

For most archs this is trivial; the S/390 version was also used
internally to set up the cpu_core_map[] though, so I renamed that.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/s390/include/asm/topology.h     |    2 +-
 arch/s390/kernel/topology.c          |   29 ++++++++++++++++++-----------
 arch/sparc/include/asm/topology_64.h |    2 +-
 arch/x86/kernel/smpboot.c            |    6 +++---
 include/asm-x86/topology.h           |    2 +-
 kernel/sched.c                       |    6 +++---
 6 files changed, 27 insertions(+), 20 deletions(-)

diff -r 8615e958e1f7 arch/s390/include/asm/topology.h
--- a/arch/s390/include/asm/topology.h	Sat Oct 04 23:52:09 2008 +1000
+++ b/arch/s390/include/asm/topology.h	Sun Oct 05 00:44:05 2008 +1000
@@ -5,7 +5,7 @@
 
 #define mc_capable()	(1)
 
-cpumask_t cpu_coregroup_map(unsigned int cpu);
+const struct cpumask *cpu_coregroup_map(unsigned int cpu);
 
 extern cpumask_t cpu_core_map[NR_CPUS];
 
diff -r 8615e958e1f7 arch/s390/kernel/topology.c
--- a/arch/s390/kernel/topology.c	Sat Oct 04 23:52:09 2008 +1000
+++ b/arch/s390/kernel/topology.c	Sun Oct 05 00:44:05 2008 +1000
@@ -68,26 +68,33 @@ static DECLARE_WORK(topology_work, topol
 
 cpumask_t cpu_core_map[NR_CPUS];
 
-cpumask_t cpu_coregroup_map(unsigned int cpu)
+const struct cpumask *cpu_coregroup_map(unsigned int cpu)
+{
+	return &cpu_core_map[cpu];
+}
+
+/* Initialize cpu_core_map[cpu] */
+static void set_cpu_coregroup_map(unsigned int cpu)
 {
 	struct core_info *core = &core_info;
-	cpumask_t mask;
+	struct cpumask *mask = &cpu_core_map[cpu];
 
-	cpus_clear(mask);
-	if (!machine_has_topology)
-		return cpu_present_map;
+	cpumask_clear(mask);
+	if (!machine_has_topology) {
+		cpumask_copy(mask, cpu_present_mask);
+		return;
+	}
 	mutex_lock(&smp_cpu_state_mutex);
 	while (core) {
-		if (cpu_isset(cpu, core->mask)) {
-			mask = core->mask;
+		if (cpumask_test_cpu(cpu, &core->mask)) {
+			cpumask_copy(mask, &core->mask);
 			break;
 		}
 		core = core->next;
 	}
 	mutex_unlock(&smp_cpu_state_mutex);
-	if (cpus_empty(mask))
-		mask = cpumask_of_cpu(cpu);
-	return mask;
+	if (cpumask_empty(mask))
+		cpumask_copy(mask, cpumask_of(cpu));
 }
 
 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
@@ -206,7 +213,7 @@ static void update_cpu_core_map(void)
 	int cpu;
 
 	for_each_present_cpu(cpu)
-		cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+		set_cpu_coregroup_map(cpu);
 }
 
 void arch_update_cpu_topology(void)
diff -r 8615e958e1f7 arch/sparc/include/asm/topology_64.h
--- a/arch/sparc/include/asm/topology_64.h	Sat Oct 04 23:52:09 2008 +1000
+++ b/arch/sparc/include/asm/topology_64.h	Sun Oct 05 00:44:05 2008 +1000
@@ -81,6 +81,6 @@ static inline int pcibus_to_node(struct 
 #define smt_capable()				(sparc64_multi_core)
 #endif /* CONFIG_SMP */
 
-#define cpu_coregroup_map(cpu)			(cpu_core_map[cpu])
+#define cpu_coregroup_map(cpu)			(&cpu_core_map[cpu])
 
 #endif /* _ASM_SPARC64_TOPOLOGY_H */
diff -r 8615e958e1f7 arch/x86/kernel/smpboot.c
--- a/arch/x86/kernel/smpboot.c	Sat Oct 04 23:52:09 2008 +1000
+++ b/arch/x86/kernel/smpboot.c	Sun Oct 05 00:44:05 2008 +1000
@@ -491,7 +491,7 @@ void __cpuinit set_cpu_sibling_map(int c
 }
 
 /* maps the cpu to the sched domain representing multi-core */
-cpumask_t cpu_coregroup_map(int cpu)
+const struct cpumask *cpu_coregroup_map(int cpu)
 {
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	/*
@@ -499,9 +499,9 @@ cpumask_t cpu_coregroup_map(int cpu)
 	 * And for power savings, we return cpu_core_map
 	 */
 	if (sched_mc_power_savings || sched_smt_power_savings)
-		return per_cpu(cpu_core_map, cpu);
+		return &per_cpu(cpu_core_map, cpu);
 	else
-		return c->llc_shared_map;
+		return &c->llc_shared_map;
 }
 
 static void impress_friends(void)
diff -r 8615e958e1f7 include/asm-x86/topology.h
--- a/include/asm-x86/topology.h	Sat Oct 04 23:52:09 2008 +1000
+++ b/include/asm-x86/topology.h	Sun Oct 05 00:44:05 2008 +1000
@@ -218,7 +218,7 @@ static inline int node_to_first_cpu(int 
 }
 #endif
 
-extern cpumask_t cpu_coregroup_map(int cpu);
+extern const struct cpumask *cpu_coregroup_map(int cpu);
 
 #ifdef ENABLE_TOPO_DEFINES
 #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
diff -r 8615e958e1f7 kernel/sched.c
--- a/kernel/sched.c	Sat Oct 04 23:52:09 2008 +1000
+++ b/kernel/sched.c	Sun Oct 05 00:44:05 2008 +1000
@@ -7030,7 +7030,7 @@ cpu_to_phys_group(int cpu, const cpumask
 {
 	int group;
 #ifdef CONFIG_SCHED_MC
-	*mask = cpu_coregroup_map(cpu);
+	*mask = *cpu_coregroup_map(cpu);
 	cpus_and(*mask, *mask, *cpu_map);
 	group = first_cpu(*mask);
 #elif defined(CONFIG_SCHED_SMT)
@@ -7356,7 +7356,7 @@ static int __build_sched_domains(const c
 		sd = &per_cpu(core_domains, i);
 		SD_INIT(sd, MC);
 		set_domain_attribute(sd, attr);
-		sd->span = cpu_coregroup_map(i);
+		sd->span = *cpu_coregroup_map(i);
 		cpus_and(sd->span, sd->span, *cpu_map);
 		sd->parent = p;
 		p->child = sd;
@@ -7393,7 +7393,7 @@ static int __build_sched_domains(const c
 #ifdef CONFIG_SCHED_MC
 	/* Set up multi-core groups */
 	for_each_cpu(i, cpu_map) {
-		*this_core_map = cpu_coregroup_map(i);
+		*this_core_map = *cpu_coregroup_map(i);
 		cpus_and(*this_core_map, *this_core_map, *cpu_map);
 		if (i != first_cpu(*this_core_map))
 			continue;
