cpumask: Use smp_call_function_many()

Change smp_call_function_mask() callers to smp_call_function_many().

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/arm/kernel/smp.c     |   23 ++++++++---------------
 arch/sparc64/kernel/smp.c |    2 +-
 arch/x86/xen/mmu.c        |   19 ++++++++++++++-----
 virt/kvm/kvm_main.c       |   45 +++++++++++++++++++++++++++++++++++----------
 4 files changed, 58 insertions(+), 31 deletions(-)

diff -r e1964287efb7 arch/arm/kernel/smp.c
--- a/arch/arm/kernel/smp.c	Mon Sep 29 13:53:28 2008 +1000
+++ b/arch/arm/kernel/smp.c	Mon Sep 29 14:31:41 2008 +1000
@@ -525,20 +525,17 @@ int setup_profiling_timer(unsigned int m
 	return -EINVAL;
 }
 
-static int
-on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
+static void
+on_each_cpu_mask(void (*func)(void *), void *info, int wait,
+		 const struct cpumask *mask)
 {
-	int ret = 0;
-
 	preempt_disable();
 
-	ret = smp_call_function_mask(mask, func, info, wait);
-	if (cpu_isset(smp_processor_id(), mask))
+	smp_call_function_many(mask, func, info, wait);
+	if (cpumask_test_cpu(smp_processor_id(), mask))
 		func(info);
 
 	preempt_enable();
-
-	return ret;
 }
 
 /**********************************************************************/
@@ -599,20 +596,17 @@ void flush_tlb_all(void)
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-	cpumask_t mask = mm->cpu_vm_mask;
-
-	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
+	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 {
-	cpumask_t mask = vma->vm_mm->cpu_vm_mask;
 	struct tlb_args ta;
 
 	ta.ta_vma = vma;
 	ta.ta_start = uaddr;
 
-	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
+	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
 }
 
 void flush_tlb_kernel_page(unsigned long kaddr)
@@ -627,14 +621,13 @@ void flush_tlb_range(struct vm_area_stru
 void flush_tlb_range(struct vm_area_struct *vma,
                      unsigned long start, unsigned long end)
 {
-	cpumask_t mask = vma->vm_mm->cpu_vm_mask;
 	struct tlb_args ta;
 
 	ta.ta_vma = vma;
 	ta.ta_start = start;
 	ta.ta_end = end;
 
-	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
+	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff -r e1964287efb7 arch/sparc64/kernel/smp.c
--- a/arch/sparc64/kernel/smp.c	Mon Sep 29 13:53:28 2008 +1000
+++ b/arch/sparc64/kernel/smp.c	Mon Sep 29 14:31:41 2008 +1000
@@ -848,7 +848,7 @@ static void tsb_sync(void *info)
 
 void smp_tsb_sync(struct mm_struct *mm)
 {
-	smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
+	smp_call_function_many(&mm->cpu_vm_mask, tsb_sync, mm, 1);
 }
 
 extern unsigned long xcall_flush_tlb_mm;
diff -r e1964287efb7 arch/x86/xen/mmu.c
--- a/arch/x86/xen/mmu.c	Mon Sep 29 13:53:28 2008 +1000
+++ b/arch/x86/xen/mmu.c	Mon Sep 29 14:31:41 2008 +1000
@@ -909,7 +909,7 @@ static void drop_other_mm_ref(void *info
 
 static void drop_mm_ref(struct mm_struct *mm)
 {
-	cpumask_t mask;
+	cpumask_var_t mask;
 	unsigned cpu;
 
 	if (current->active_mm == mm) {
@@ -921,7 +921,15 @@ static void drop_mm_ref(struct mm_struct
 	}
 
 	/* Get the "official" set of cpus referring to our pagetable. */
-	mask = mm->cpu_vm_mask;
+	if (!alloc_cpumask_var_copy(&mask, &mm->cpu_vm_mask, GFP_ATOMIC)) {
+		for_each_online_cpu(cpu) {
+			if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
+			    && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
+				continue;
+			smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
+		}
+		return;
+	}
 
 	/* It's possible that a vcpu may have a stale reference to our
 	   cr3, because its in lazy mode, and it hasn't yet flushed
@@ -930,11 +938,12 @@ static void drop_mm_ref(struct mm_struct
 	   if needed. */
 	for_each_online_cpu(cpu) {
 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
-			cpu_set(cpu, mask);
+			cpumask_set_cpu(cpu, mask);
 	}
 
-	if (!cpus_empty(mask))
-		smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
+	if (!cpumask_empty(mask))
+		smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
+	free_cpumask_var(mask);
 }
 #else
 static void drop_mm_ref(struct mm_struct *mm)
diff -r e1964287efb7 virt/kvm/kvm_main.c
--- a/virt/kvm/kvm_main.c	Mon Sep 29 13:53:28 2008 +1000
+++ b/virt/kvm/kvm_main.c	Mon Sep 29 14:31:41 2008 +1000
@@ -106,11 +106,23 @@ void kvm_flush_remote_tlbs(struct kvm *k
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
 	int i, cpu, me;
-	cpumask_t cpus;
+	cpumask_var_t cpus;
 	struct kvm_vcpu *vcpu;
 
 	me = get_cpu();
-	cpus_clear(cpus);
+	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
+		/* Slow path on failure.  Call everyone. */
+		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+			vcpu = kvm->vcpus[i];
+			if (vcpu)
+				set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
+		}
+		++kvm->stat.remote_tlb_flush;
+		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+		put_cpu();
+		return;
+	}
+
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		vcpu = kvm->vcpus[i];
 		if (!vcpu)
@@ -119,24 +131,36 @@ void kvm_flush_remote_tlbs(struct kvm *k
 			continue;
 		cpu = vcpu->cpu;
 		if (cpu != -1 && cpu != me)
-			cpu_set(cpu, cpus);
+			cpumask_set_cpu(cpu, cpus);
 	}
-	if (cpus_empty(cpus))
+	if (cpumask_empty(cpus))
 		goto out;
 	++kvm->stat.remote_tlb_flush;
-	smp_call_function_mask(cpus, ack_flush, NULL, 1);
+	smp_call_function_many(cpus, ack_flush, NULL, 1);
 out:
 	put_cpu();
+	free_cpumask_var(cpus);
 }
 
 void kvm_reload_remote_mmus(struct kvm *kvm)
 {
 	int i, cpu, me;
-	cpumask_t cpus;
+	cpumask_var_t cpus;
 	struct kvm_vcpu *vcpu;
 
 	me = get_cpu();
-	cpus_clear(cpus);
+	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
+		/* Slow path on failure.  Call everyone. */
+		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+			vcpu = kvm->vcpus[i];
+			if (vcpu)
+				set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
+		}
+		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+		put_cpu();
+		return;
+	}
+
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		vcpu = kvm->vcpus[i];
 		if (!vcpu)
@@ -145,13 +169,14 @@ void kvm_reload_remote_mmus(struct kvm *
 			continue;
 		cpu = vcpu->cpu;
 		if (cpu != -1 && cpu != me)
-			cpu_set(cpu, cpus);
+			cpumask_set_cpu(cpu, cpus);
 	}
-	if (cpus_empty(cpus))
+	if (cpumask_empty(cpus))
 		goto out;
-	smp_call_function_mask(cpus, ack_flush, NULL, 1);
+	smp_call_function_many(cpus, ack_flush, NULL, 1);
 out:
 	put_cpu();
+	free_cpumask_var(cpus);
 }
 
 
