cpumask: use tsk_cpumask() to access task_struct cpus_allowed.

This allows us to play with cpus_allowed.

We also take the change to use modern cpumask_* operators on the
lines changed.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/blackfin/kernel/process.c            |    3 ++-
 arch/mips/include/asm/system.h            |    3 ++-
 arch/mips/kernel/traps.c                  |    8 ++++----
 arch/powerpc/kernel/smp.c                 |    2 +-
 arch/powerpc/platforms/cell/spufs/sched.c |    2 +-
 kernel/cpu.c                              |    2 +-
 kernel/cpuset.c                           |    8 ++++----
 kernel/fork.c                             |    4 ++--
 kernel/kthread.c                          |    2 +-
 kernel/sched.c                            |   30 +++++++++++++++---------------
 kernel/sched_cpupri.c                     |    4 ++--
 kernel/sched_fair.c                       |    6 +++---
 kernel/sched_rt.c                         |    6 +++---
 lib/smp_processor_id.c                    |    2 +-
 14 files changed, 42 insertions(+), 40 deletions(-)

diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -177,7 +177,8 @@ asmlinkage int bfin_clone(struct pt_regs
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
 	if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
-		current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
+		cpumask_copy(tsk_cpumask(current),
+			     cpumask_of(smp_processor_id()));
 		current->rt.nr_cpus_allowed = 1;
 	}
 #endif
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
--- a/arch/mips/include/asm/system.h
+++ b/arch/mips/include/asm/system.h
@@ -54,7 +54,8 @@ do {									\
 	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
 	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
 		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
-		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
+		cpumask_copy(tsk_cpumask(prev),				\
+			     &prev->thread.user_cpus_allowed);		\
 	}								\
 	next->thread.emulated_fp = 0;					\
 } while(0)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -844,13 +844,13 @@ static void mt_ase_fp_affinity(void)
 		 * restricted the allowed set to exclude any CPUs with FPUs,
 		 * we'll skip the procedure.
 		 */
-		if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
+		if (cpumask_intersects(tsk_cpumask(current), &mt_fpu_cpumask)) {
 			cpumask_t tmask;
 
 			current->thread.user_cpus_allowed
-				= current->cpus_allowed;
-			cpus_and(tmask, current->cpus_allowed,
-				mt_fpu_cpumask);
+				= *tsk_cpumask(current);
+			cpumask_and(&tmask, tsk_cpumask(current),
+				&mt_fpu_cpumask);
 			set_cpus_allowed(current, tmask);
 			set_thread_flag(TIF_FPUBOUND);
 		}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -553,7 +553,7 @@ void __init smp_cpus_done(unsigned int m
 	 * init thread may have been "borrowed" by another CPU in the meantime
 	 * se we pin us down to CPU 0 for a short while
 	 */
-	old_mask = current->cpus_allowed;
+	old_mask = *tsk_cpumask(current);
 	set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
 	
 	if (smp_ops)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -139,7 +139,7 @@ void __spu_update_sched_info(struct spu_
 	 * runqueue. The context will be rescheduled on the proper node
 	 * if it is timesliced or preempted.
 	 */
-	ctx->cpus_allowed = current->cpus_allowed;
+	cpumask_copy(&ctx->cpus_allowed, tsk_cpumask(current));
 
 	/* Save the current cpu id for spu interrupt routing. */
 	ctx->last_ran = raw_smp_processor_id();
diff --git a/kernel/cpu.c b/kernel/cpu.c
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -220,7 +220,7 @@ static int __ref _cpu_down(unsigned int 
 	}
 
 	/* Ensure that we are not runnable on dying cpu */
-	cpumask_copy(old_allowed, &current->cpus_allowed);
+	cpumask_copy(old_allowed, tsk_cpumask(current));
 	set_cpus_allowed_ptr(current,
 			     cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
 
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -868,7 +868,7 @@ static int cpuset_test_cpumask(struct ta
 static int cpuset_test_cpumask(struct task_struct *tsk,
 			       struct cgroup_scanner *scan)
 {
-	return !cpumask_equal(&tsk->cpus_allowed,
+	return !cpumask_equal(tsk_cpumask(tsk),
 			(cgroup_cs(scan->cg))->cpus_allowed);
 }
 
@@ -1361,7 +1361,7 @@ static int cpuset_can_attach(struct cgro
 
 	if (tsk->flags & PF_THREAD_BOUND) {
 		mutex_lock(&callback_mutex);
-		if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
+		if (!cpumask_equal(tsk_cpumask(tsk), cs->cpus_allowed))
 			ret = -EINVAL;
 		mutex_unlock(&callback_mutex);
 	}
@@ -2548,10 +2548,10 @@ void cpuset_task_status_allowed(struct s
 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
 {
 	seq_printf(m, "Cpus_allowed:\t");
-	seq_cpumask(m, &task->cpus_allowed);
+	seq_cpumask(m, tsk_cpumask(task));
 	seq_printf(m, "\n");
 	seq_printf(m, "Cpus_allowed_list:\t");
-	seq_cpumask_list(m, &task->cpus_allowed);
+	seq_cpumask_list(m, tsk_cpumask(task));
 	seq_printf(m, "\n");
 	seq_printf(m, "Mems_allowed:\t");
 	seq_nodemask(m, &task->mems_allowed);
diff --git a/kernel/fork.c b/kernel/fork.c
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1209,9 +1209,9 @@ static struct task_struct *copy_process(
 	 * to ensure it is on a valid CPU (and if not, just force it back to
 	 * parent's CPU). This avoids alot of nasty races.
 	 */
-	p->cpus_allowed = current->cpus_allowed;
+	cpumask_copy(tsk_cpumask(p), tsk_cpumask(current));
 	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
-	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
+	if (unlikely(!cpumask_test_cpu(task_cpu(p), tsk_cpumask(p)) ||
 			!cpu_online(task_cpu(p))))
 		set_task_cpu(p, smp_processor_id());
 
diff --git a/kernel/kthread.c b/kernel/kthread.c
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -179,7 +179,7 @@ void kthread_bind(struct task_struct *k,
 		return;
 	}
 	set_task_cpu(k, cpu);
-	k->cpus_allowed = cpumask_of_cpu(cpu);
+	cpumask_copy(tsk_cpumask(k), cpumask_of(cpu));
 	k->rt.nr_cpus_allowed = 1;
 	k->flags |= PF_THREAD_BOUND;
 }
diff --git a/kernel/sched.c b/kernel/sched.c
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2175,7 +2175,7 @@ find_idlest_group(struct sched_domain *s
 
 		/* Skip over this group if it has no CPUs allowed */
 		if (!cpumask_intersects(sched_group_cpus(group),
-					&p->cpus_allowed))
+					tsk_cpumask(p)))
 			continue;
 
 		local_group = cpumask_test_cpu(this_cpu,
@@ -2223,7 +2223,7 @@ find_idlest_cpu(struct sched_group *grou
 	int i;
 
 	/* Traverse only the allowed CPUs */
-	for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
+	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpumask(p)) {
 		load = weighted_cpuload(i);
 
 		if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2940,7 +2940,7 @@ static void sched_migrate_task(struct ta
 	struct rq *rq;
 
 	rq = task_rq_lock(p, &flags);
-	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
+	if (!cpumask_test_cpu(dest_cpu, tsk_cpumask(p))
 	    || unlikely(!cpu_active(dest_cpu)))
 		goto out;
 
@@ -3006,7 +3006,7 @@ int can_migrate_task(struct task_struct 
 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
 	 * 3) are cache-hot on their current CPU.
 	 */
-	if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
+	if (!cpumask_test_cpu(this_cpu, tsk_cpumask(p))) {
 		schedstat_inc(p, se.nr_failed_migrations_affine);
 		return 0;
 	}
@@ -3910,7 +3910,7 @@ redo:
 			 * task on busiest cpu can't be moved to this_cpu
 			 */
 			if (!cpumask_test_cpu(this_cpu,
-					      &busiest->curr->cpus_allowed)) {
+					      tsk_cpumask(busiest->curr))) {
 				spin_unlock_irqrestore(&busiest->lock, flags);
 				all_pinned = 1;
 				goto out_one_pinned;
@@ -4087,7 +4087,7 @@ redo:
 		 * don't kick the migration_thread, if the curr
 		 * task on busiest cpu can't be moved to this_cpu
 		 */
-		if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
+		if (!cpumask_test_cpu(this_cpu, tsk_cpumask(busiest->curr))) {
 			double_unlock_balance(this_rq, busiest);
 			all_pinned = 1;
 			return ld_moved;
@@ -6036,7 +6036,7 @@ long sched_getaffinity(pid_t pid, struct
 	if (retval)
 		goto out_unlock;
 
-	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+	cpumask_and(mask, tsk_cpumask(p), cpu_online_mask);
 
 out_unlock:
 	read_unlock(&tasklist_lock);
@@ -6414,7 +6414,7 @@ void __cpuinit init_idle(struct task_str
 	idle->se.exec_start = sched_clock();
 
 	idle->prio = idle->normal_prio = MAX_PRIO;
-	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+	cpumask_copy(tsk_cpumask(idle), cpumask_of(cpu));
 	__set_task_cpu(idle, cpu);
 
 	rq->curr = rq->idle = idle;
@@ -6512,7 +6512,7 @@ int set_cpus_allowed_ptr(struct task_str
 	}
 
 	if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
-		     !cpumask_equal(&p->cpus_allowed, new_mask))) {
+		     !cpumask_equal(tsk_cpumask(p), new_mask))) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -6520,7 +6520,7 @@ int set_cpus_allowed_ptr(struct task_str
 	if (p->sched_class->set_cpus_allowed)
 		p->sched_class->set_cpus_allowed(p, new_mask);
 	else {
-		cpumask_copy(&p->cpus_allowed, new_mask);
+		cpumask_copy(tsk_cpumask(p), new_mask);
 		p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
 	}
 
@@ -6570,7 +6570,7 @@ static int __migrate_task(struct task_st
 	if (task_cpu(p) != src_cpu)
 		goto done;
 	/* Affinity changed (again). */
-	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+	if (!cpumask_test_cpu(dest_cpu, tsk_cpumask(p)))
 		goto fail;
 
 	on_rq = p->se.on_rq;
@@ -6673,18 +6673,18 @@ again:
 again:
 	/* Look for allowed, online CPU in same node. */
 	for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
-		if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+		if (cpumask_test_cpu(dest_cpu, tsk_cpumask(p)))
 			goto move;
 
 	/* Any allowed, online CPU? */
-	dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
+	dest_cpu = cpumask_any_and(tsk_cpumask(p), cpu_online_mask);
 	if (dest_cpu < nr_cpu_ids)
 		goto move;
 
 	/* No more Mr. Nice Guy. */
 	if (dest_cpu >= nr_cpu_ids) {
-		cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-		dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
+		cpuset_cpus_allowed_locked(p, tsk_cpumask(p));
+		dest_cpu = cpumask_any_and(cpu_online_mask, tsk_cpumask(p));
 
 		/*
 		 * Don't tell them about moving exiting tasks or
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -78,11 +78,11 @@ int cpupri_find(struct cpupri *cp, struc
 		if (idx >= task_pri)
 			break;
 
-		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+		if (cpumask_any_and(tsk_cpumask(p), vec->mask) >= nr_cpu_ids)
 			continue;
 
 		if (lowest_mask)
-			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+			cpumask_and(lowest_mask, tsk_cpumask(p), vec->mask);
 		return 1;
 	}
 
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1057,7 +1057,7 @@ static int wake_idle(int cpu, struct tas
 	if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
 		idle_cpu(cpu) && idle_cpu(this_cpu) &&
 		p->mm && !(p->flags & PF_KTHREAD) &&
-		cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
+		cpumask_test_cpu(chosen_wakeup_cpu, tsk_cpumask(p)))
 		return chosen_wakeup_cpu;
 
 	/*
@@ -1077,7 +1077,7 @@ static int wake_idle(int cpu, struct tas
 		    || ((sd->flags & SD_WAKE_IDLE_FAR)
 			&& !task_hot(p, task_rq(p)->clock, sd))) {
 			for_each_cpu_and(i, sched_domain_span(sd),
-					 &p->cpus_allowed) {
+					 tsk_cpumask(p)) {
 				if (cpu_active(i) && idle_cpu(i)) {
 					if (i != task_cpu(p)) {
 						schedstat_inc(p,
@@ -1277,7 +1277,7 @@ static int select_task_rq_fair(struct ta
 		}
 	}
 
-	if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
+	if (unlikely(!cpumask_test_cpu(this_cpu, tsk_cpumask(p))))
 		goto out;
 
 	/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1074,7 +1074,7 @@ static int pick_rt_task(struct rq *rq, s
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
 	if (!task_running(rq, p) &&
-	    (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
+	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpumask(p))) &&
 	    (p->rt.nr_cpus_allowed > 1))
 		return 1;
 	return 0;
@@ -1211,7 +1211,7 @@ static struct rq *find_lock_lowest_rq(st
 			 */
 			if (unlikely(task_rq(task) != rq ||
 				     !cpumask_test_cpu(lowest_rq->cpu,
-						       &task->cpus_allowed) ||
+						       tsk_cpumask(task)) ||
 				     task_running(rq, task) ||
 				     !task->se.on_rq)) {
 
@@ -1529,7 +1529,7 @@ static void set_cpus_allowed_rt(struct t
 		update_rt_migration(&rq->rt);
 	}
 
-	cpumask_copy(&p->cpus_allowed, new_mask);
+	cpumask_copy(tsk_cpumask(p), new_mask);
 	p->rt.nr_cpus_allowed = weight;
 }
 
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor
 	 * Kernel threads bound to a single CPU can safely use
 	 * smp_processor_id():
 	 */
-	if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+	if (cpumask_equal(tsk_cpumask(current), cpumask_of(this_cpu)))
 		goto out;
 
 	/*
