From b57fc9e80692043e2a3a74e1d2c047eb700dcd0c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 28 Feb 2014 16:12:25 +0000 Subject: arm64: Fix !CONFIG_SMP kernel build Commit fb4a96029c8a (arm64: kernel: fix per-cpu offset restore on resume) uses per_cpu_offset() unconditionally during CPU wakeup, however, this is only defined for the SMP case. Signed-off-by: Catalin Marinas Reported-by: Dave P Martin --- arch/arm64/include/asm/percpu.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 13fb0b3efc5f..453a179469a3 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#ifdef CONFIG_SMP + static inline void set_my_cpu_offset(unsigned long off) { asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); @@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void) } #define __my_cpu_offset __my_cpu_offset() +#else /* !CONFIG_SMP */ + +#define set_my_cpu_offset(x) do { } while (0) + +#endif /* CONFIG_SMP */ + #include #endif /* __ASM_PERCPU_H */ -- cgit v1.2.3