cpumask: use mm_cpumask() wrapper: mn10300

Makes code futureproof against the impending change to mm->cpu_vm_mask.

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 include/asm-mn10300/mmu_context.h |   12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-mn10300/mmu_context.h b/include/asm-mn10300/mmu_context.h
--- a/include/asm-mn10300/mmu_context.h
+++ b/include/asm-mn10300/mmu_context.h
@@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[N
 #define enter_lazy_tlb(mm, tsk)	do {} while (0)
 
 #ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, task) \
-	cpu_set((cpu), (task)->cpu_vm_mask)
-#define cpu_maybe_ran_vm(cpu, task) \
-	cpu_test_and_set((cpu), (task)->cpu_vm_mask)
+#define cpu_ran_vm(cpu, mm) \
+	cpumask_set_cpu((cpu), mm_cpumask(mm))
+#define cpu_maybe_ran_vm(cpu, mm) \
+	cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
 #else
-#define cpu_ran_vm(cpu, task)		do {} while (0)
-#define cpu_maybe_ran_vm(cpu, task)	true
+#define cpu_ran_vm(cpu, mm)		do {} while (0)
+#define cpu_maybe_ran_vm(cpu, mm)	true
 #endif /* CONFIG_SMP */
 
 /*
