alloc_percpu: use __percpu annotation for sparse.

Add __percpu for sparse.

We have to make __kernel "__attribute__((address_space(0)))" so we can
cast to it.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Al Viro <viro@zeniv.linux.org.uk>
---
 include/asm-generic/percpu.h |   19 ++++++++++++-------
 include/linux/compiler.h     |    4 +++-
 include/linux/percpu.h       |    8 ++++----
 3 files changed, 19 insertions(+), 12 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,7 +5,7 @@
 
 #ifdef __CHECKER__
 # define __user		__attribute__((noderef, address_space(1)))
-# define __kernel	/* default address space */
+# define __kernel	__attribute__((address_space(0)))
 # define __safe		__attribute__((safe))
 # define __force	__attribute__((force))
 # define __nocast	__attribute__((nocast))
@@ -15,6 +15,7 @@
 # define __acquire(x)	__context__(x,1)
 # define __release(x)	__context__(x,-1)
 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu	__attribute__((noderef, address_space(3)))
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
 #else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile 
 # define __acquire(x) (void)0
 # define __release(x) (void)0
 # define __cond_lock(x,c) (c)
+# define __percpu
 #endif
 
 #ifdef __KERNEL__
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -45,7 +45,9 @@ extern unsigned long __per_cpu_offset[NR
  * Only S390 provides its own means of moving the pointer.
  */
 #ifndef SHIFT_PERCPU_PTR
-#define SHIFT_PERCPU_PTR(__p, __offset)	RELOC_HIDE((__p), (__offset))
+/* Weird cast keeps both GCC and sparse happy. */
+#define SHIFT_PERCPU_PTR(__p, __offset)	\
+	((typeof(*__p) __kernel __force *)RELOC_HIDE((__p), (__offset)))
 #endif
 
 /*
@@ -72,16 +74,19 @@ extern unsigned long __per_cpu_offset[NR
 #endif /* read_percpu_var */
 
 /* Use RELOC_HIDE: some arch's SHIFT_PERCPU_PTR really want an identifier. */
+#define RELOC_PERCPU(addr, off) \
+	((typeof(*addr) __kernel __force *)RELOC_HIDE((addr), (off)))
+
 /**
  * per_cpu_ptr - get a pointer to a particular cpu's allocated memory
- * @ptr: the pointer returned from alloc_percpu
+ * @ptr: the pointer returned from alloc_percpu, or &per-cpu var
  * @cpu: the cpu whose memory you want to access
  *
  * Similar to per_cpu(), except for dynamic memory.
  * cpu_possible(@cpu) must be true.
  */
 #define per_cpu_ptr(ptr, cpu) \
-	RELOC_HIDE((ptr), (per_cpu_offset(cpu)))
+	RELOC_PERCPU((ptr), (per_cpu_offset(cpu)))
 
 /**
  * __get_cpu_ptr - get a pointer to this cpu's allocated memory
@@ -89,8 +94,8 @@ extern unsigned long __per_cpu_offset[NR
  *
  * Similar to __get_cpu_var(), except for dynamic memory.
  */
-#define __get_cpu_ptr(ptr) RELOC_HIDE(ptr, my_cpu_offset)
-#define __raw_get_cpu_ptr(ptr) RELOC_HIDE(ptr, __my_cpu_offset)
+#define __get_cpu_ptr(ptr) RELOC_PERCPU(ptr, my_cpu_offset)
+#define __raw_get_cpu_ptr(ptr) RELOC_PERCPU(ptr, __my_cpu_offset)
 
 #ifndef read_percpu_ptr
 /**
@@ -124,7 +129,7 @@ extern void setup_per_cpu_areas(void);
 #define PER_CPU_ATTRIBUTES
 #endif
 
-#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
-					__typeof__(type) per_cpu_var(name)
+#define DECLARE_PER_CPU(type, name) \
+	extern PER_CPU_ATTRIBUTES __percpu __typeof__(type) per_cpu_var(name)
 
 #endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -11,7 +11,7 @@
 #ifdef CONFIG_SMP
 #define DEFINE_PER_CPU(type, name)					\
 	__attribute__((__section__(".data.percpu")))			\
-	PER_CPU_ATTRIBUTES __typeof__(type) name
+	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name
 
 #ifdef MODULE
 #define SHARED_ALIGNED_SECTION ".data.percpu"
@@ -21,15 +21,15 @@
 
 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\
 	__attribute__((__section__(SHARED_ALIGNED_SECTION)))		\
-	PER_CPU_ATTRIBUTES __typeof__(type) name			\
+	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name		\
 	____cacheline_aligned_in_smp
 
 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)			\
 	__attribute__((__section__(".data.percpu.page_aligned")))	\
-	PER_CPU_ATTRIBUTES __typeof__(type) name
+	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name
 #else
 #define DEFINE_PER_CPU(type, name)					\
-	PER_CPU_ATTRIBUTES __typeof__(type) name
+	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name
 
 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		      \
 	DEFINE_PER_CPU(type, name)
