cpumask: make cpu_vm_mask a cpumask_var_t FIXME: grep for cpu_vm_mask!

Reducing the size of struct mm_struct when nr_cpu_ids << NR_CPUS is one
effect, but it also helps us get rid of the 'struct cpumask' definition.

Really, since we (almost) always dynamically allocate this, it would
be more efficient to use the dangling-bitmap trick.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/x86/include/asm/mmu_context_32.h |    6 +++---
 arch/x86/kernel/init_task.c           |    3 +++
 arch/x86/kernel/ldt.c                 |    4 ++--
 arch/x86/kernel/tlb_32.c              |    9 +++++----
 arch/x86/kernel/tlb_64.c              |    8 ++++----
 include/linux/init_task.h             |    9 ++++++++-
 include/linux/mm_types.h              |    2 +-
 kernel/fork.c                         |   12 ++++++++++--
 8 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
--- a/arch/x86/include/asm/mmu_context_32.h
+++ b/arch/x86/include/asm/mmu_context_32.h
@@ -18,12 +18,12 @@ static inline void switch_mm(struct mm_s
 
 	if (likely(prev != next)) {
 		/* stop flush ipis for the previous mm */
-		cpu_clear(cpu, prev->cpu_vm_mask);
+		cpumask_clear_cpu(cpu, prev->cpu_vm_mask);
 #ifdef CONFIG_SMP
 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
 		per_cpu(cpu_tlbstate, cpu).active_mm = next;
 #endif
-		cpu_set(cpu, next->cpu_vm_mask);
+		cpumask_set_cpu(cpu, next->cpu_vm_mask);
 
 		/* Re-load page tables */
 		load_cr3(next->pgd);
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_s
 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
 		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
 
-		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+		if (!cpumask_test_and_set_cpu(cpu, next->cpu_vm_mask)) {
 			/* We were in lazy tlb mode and leave_mm disabled
 			 * tlb flush IPI delivery. We must reload %cr3.
 			 */
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -10,6 +10,9 @@
 #include <asm/pgtable.h>
 #include <asm/desc.h>
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+DECLARE_BITMAP(init_cpu_vm_mask, NR_CPUS) = CPU_BITS_ALL;
+#endif
 static struct fs_struct init_fs = INIT_FS;
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, i
 #ifdef CONFIG_SMP
 		preempt_disable();
 		load_LDT(pc);
-		if (!cpus_equal(current->mm->cpu_vm_mask,
-				cpumask_of_cpu(smp_processor_id())))
+		if (!cpumask_equal(current->mm->cpu_vm_mask,
+				   cpumask_of(smp_processor_id())))
 			smp_call_function(flush_ldt, current->mm, 1);
 		preempt_enable();
 #else
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -36,7 +36,8 @@ void leave_mm(int cpu)
 {
 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
 		BUG();
-	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
+	cpumask_clear_cpu(cpu,
+			  per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
 	load_cr3(swapper_pg_dir);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -181,7 +182,7 @@ void flush_tlb_current_task(void)
 	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
+	cpu_mask = *mm->cpu_vm_mask;
 	cpu_clear(smp_processor_id(), cpu_mask);
 
 	local_flush_tlb();
@@ -195,7 +196,7 @@ void flush_tlb_mm(struct mm_struct *mm)
 	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
+	cpu_mask = *mm->cpu_vm_mask;
 	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
@@ -216,7 +217,7 @@ void flush_tlb_page(struct vm_area_struc
 	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
+	cpu_mask = *mm->cpu_vm_mask;
 	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -64,7 +64,7 @@ void leave_mm(int cpu)
 {
 	if (read_pda(mmu_state) == TLBSTATE_OK)
 		BUG();
-	cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
+	cpumask_clear_cpu(cpu, read_pda(active_mm)->cpu_vm_mask);
 	load_cr3(swapper_pg_dir);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -218,7 +218,7 @@ void flush_tlb_current_task(void)
 	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
+	cpu_mask = *mm->cpu_vm_mask;
 	cpu_clear(smp_processor_id(), cpu_mask);
 
 	local_flush_tlb();
@@ -232,7 +232,7 @@ void flush_tlb_mm(struct mm_struct *mm)
 	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
+	cpu_mask = *mm->cpu_vm_mask;
 	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
@@ -253,7 +253,7 @@ void flush_tlb_page(struct vm_area_struc
 	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask;
+	cpu_mask = *mm->cpu_vm_mask;
 	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -26,6 +26,13 @@ extern struct files_struct init_files;
 	.max_reqs	= ~0U,				\
 }
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+extern DECLARE_BITMAP(init_cpu_vm_mask, NR_CPUS);
+#define INIT_CPU_VM_MASK to_cpumask(init_cpu_vm_mask)
+#else
+#define INIT_CPU_VM_MASK { { CPU_BITS_ALL } }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
 #define INIT_MM(name) \
 {			 					\
 	.mm_rb		= RB_ROOT,				\
@@ -35,7 +42,7 @@ extern struct files_struct init_files;
 	.mmap_sem	= __RWSEM_INITIALIZER(name.mmap_sem),	\
 	.page_table_lock =  __SPIN_LOCK_UNLOCKED(name.page_table_lock),	\
 	.mmlist		= LIST_HEAD_INIT(name.mmlist),		\
-	.cpu_vm_mask	= CPU_MASK_ALL,				\
+	.cpu_vm_mask	= INIT_CPU_VM_MASK,			\
 }
 
 #define INIT_SIGNALS(sig) {						\
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -211,7 +211,7 @@ struct mm_struct {
 
 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
-	cpumask_t cpu_vm_mask;
+	cpumask_var_t cpu_vm_mask;
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
diff --git a/kernel/fork.c b/kernel/fork.c
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -263,6 +263,11 @@ static int dup_mmap(struct mm_struct *mm
 	unsigned long charge;
 	struct mempolicy *pol;
 
+	/* If cpumask_var_t is an array, it's already copied. */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+	cpumask_copy(mm->cpu_vm_mask, oldmm->cpu_vm_mask);
+#endif
+
 	down_write(&oldmm->mmap_sem);
 	flush_cache_dup_mm(oldmm);
 	/*
@@ -276,7 +281,6 @@ static int dup_mmap(struct mm_struct *mm
 	mm->free_area_cache = oldmm->mmap_base;
 	mm->cached_hole_size = ~0UL;
 	mm->map_count = 0;
-	cpus_clear(mm->cpu_vm_mask);
 	mm->mm_rb = RB_ROOT;
 	rb_link = &mm->mm_rb.rb_node;
 	rb_parent = NULL;
@@ -418,12 +422,16 @@ static struct mm_struct * mm_init(struct
 	mm->cached_hole_size = ~0UL;
 	mm_init_owner(mm, p);
 
+	if (!alloc_cpumask_var(&mm->cpu_vm_mask, GFP_KERNEL))
+		goto free_mm;
+
 	if (likely(!mm_alloc_pgd(mm))) {
 		mm->def_flags = 0;
 		mmu_notifier_mm_init(mm);
 		return mm;
 	}
-
+	free_cpumask_var(mm->cpu_vm_mask);
+free_mm:
 	free_mm(mm);
 	return NULL;
 }
