alloc_percpu: Use __get_cpu_ptr / get_cpu_ptr / get_cpu_var in kernel

Impact: slight efficiency improvement on some archs.

Let's use __get_cpu_ptr and get_cpu_ptr now, rather than
per_cpu_ptr(..., smp_processor_id()).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
---
 kernel/posix-cpu-timers.c |    2 +-
 kernel/sched.c            |    5 ++---
 kernel/sched_stats.h      |    4 ++--
 kernel/srcu.c             |    4 ++--
 kernel/workqueue.c        |    4 ++--
 5 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -37,7 +37,7 @@ int thread_group_cputime_alloc(struct ta
 		return 0;
 	}
 	sig->cputime.totals = cputime;
-	cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
+	cputime = __get_cpu_ptr(sig->cputime.totals);
 	cputime->utime = tsk->utime;
 	cputime->stime = tsk->stime;
 	cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
diff --git a/kernel/sched.c b/kernel/sched.c
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -683,12 +683,11 @@ static inline void update_rq_clock(struc
  */
 int runqueue_is_locked(void)
 {
-	int cpu = get_cpu();
-	struct rq *rq = cpu_rq(cpu);
+	struct rq *rq = &get_cpu_var(runqueues);
 	int ret;
 
 	ret = spin_is_locked(&rq->lock);
-	put_cpu();
+	put_cpu_var(runqueues);
 	return ret;
 }
 
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -305,7 +305,7 @@ static inline void account_group_user_ti
 	if (sig->cputime.totals) {
 		struct task_cputime *times;
 
-		times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+		times = get_cpu_ptr(sig->cputime.totals);
 		times->utime = cputime_add(times->utime, cputime);
 		put_cpu_no_resched();
 	}
@@ -364,7 +364,7 @@ static inline void account_group_exec_ru
 	if (sig->cputime.totals) {
 		struct task_cputime *times;
 
-		times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+		times = get_cpu_ptr(sig->cputime.totals);
 		times->sum_exec_runtime += ns;
 		put_cpu_no_resched();
 	}
diff --git a/kernel/srcu.c b/kernel/srcu.c
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -113,7 +113,7 @@ int srcu_read_lock(struct srcu_struct *s
 	preempt_disable();
 	idx = sp->completed & 0x1;
 	barrier();  /* ensure compiler looks -once- at sp->completed. */
-	per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++;
+	__get_cpu_ptr(sp->per_cpu_ref)->c[idx]++;
 	srcu_barrier();  /* ensure compiler won't misorder critical section. */
 	preempt_enable();
 	return idx;
@@ -133,7 +133,7 @@ void srcu_read_unlock(struct srcu_struct
 {
 	preempt_disable();
 	srcu_barrier();  /* ensure compiler won't misorder critical section. */
-	per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
+	__get_cpu_ptr(sp->per_cpu_ref)->c[idx]--;
 	preempt_enable();
 }
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -739,12 +739,12 @@ int current_is_keventd(void)
 int current_is_keventd(void)
 {
 	struct cpu_workqueue_struct *cwq;
-	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
 	int ret = 0;
 
 	BUG_ON(!keventd_wq);
 
-	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
+	/* preempt-safe: keventd is per-cpu */
+	cwq = __raw_get_cpu_ptr(keventd_wq->cpu_wq);
 	if (current == cwq->thread)
 		ret = 1;
 
