diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2020-08-25 18:37:17 -0700 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2020-10-05 21:02:29 -0700 |
commit | dd7c7ab01a04d645b7e7baa8530bfd81e31a2202 (patch) | |
tree | 0560a3d6a1e267761641b89bab123c142a4019ec /arch/arc/include | |
parent | 549738f15da0e5a00275977623be199fbbf7df50 (diff) |
ARC: [plat-eznps]: Drop support for EZChip NPS platform
NPS customers are no longer doing active development, as evident from
rand config build failures reported in recent times, so drop support
for NPS platform.
Tested-by: kernel test robot <lkp@intel.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 104 | ||||
-rw-r--r-- | arch/arc/include/asm/barrier.h | 9 | ||||
-rw-r--r-- | arch/arc/include/asm/bitops.h | 58 | ||||
-rw-r--r-- | arch/arc/include/asm/cmpxchg.h | 70 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-compact.h | 27 | ||||
-rw-r--r-- | arch/arc/include/asm/processor.h | 37 | ||||
-rw-r--r-- | arch/arc/include/asm/ptrace.h | 5 | ||||
-rw-r--r-- | arch/arc/include/asm/setup.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/spinlock.h | 6 | ||||
-rw-r--r-- | arch/arc/include/asm/switch_to.h | 9 |
10 files changed, 5 insertions, 324 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index c614857eb209..fa679b7396d2 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -14,8 +14,6 @@ #include <asm/barrier.h> #include <asm/smp.h> -#ifndef CONFIG_ARC_PLAT_EZNPS - #define atomic_read(v) READ_ONCE((v)->counter) #ifdef CONFIG_ARC_HAS_LLSC @@ -195,108 +193,6 @@ ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) -#else /* CONFIG_ARC_PLAT_EZNPS */ - -static inline int atomic_read(const atomic_t *v) -{ - int temp; - - __asm__ __volatile__( - " ld.di %0, [%1]" - : "=r"(temp) - : "r"(&v->counter) - : "memory"); - return temp; -} - -static inline void atomic_set(atomic_t *v, int i) -{ - __asm__ __volatile__( - " st.di %0,[%1]" - : - : "r"(i), "r"(&v->counter) - : "memory"); -} - -#define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - : \ - : "r"(i), "r"(&v->counter), "i"(asm_op) \ - : "r2", "r3", "memory"); \ -} \ - -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ -{ \ - unsigned int temp = i; \ - \ - /* Explicit full memory barrier needed before/after */ \ - smp_mb(); \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - " mov %0, r2" \ - : "+r"(temp) \ - : "r"(&v->counter), "i"(asm_op) \ - : "r2", "r3", "memory"); \ - \ - smp_mb(); \ - \ - temp c_op i; \ - \ - return temp; \ -} - -#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ -{ \ - unsigned int temp = i; \ - \ - /* Explicit full memory barrier needed before/after */ \ - smp_mb(); \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - " mov %0, r2" \ - : "+r"(temp) \ - : "r"(&v->counter), "i"(asm_op) \ - : "r2", "r3", "memory"); \ - \ - smp_mb(); \ - \ - return temp; \ -} - -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) \ - ATOMIC_FETCH_OP(op, c_op, asm_op) - -ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) -#define atomic_sub(i, v) atomic_add(-(i), (v)) -#define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) -#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v)) - -#undef ATOMIC_OPS -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_FETCH_OP(op, c_op, asm_op) - -ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) -ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) -ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) - -#endif /* CONFIG_ARC_PLAT_EZNPS */ - #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h index 7823811e7cf5..4637de9e02fa 100644 --- a/arch/arc/include/asm/barrier.h +++ b/arch/arc/include/asm/barrier.h @@ -27,7 +27,7 @@ #define rmb() asm volatile("dmb 1\n" : : : "memory") #define wmb() asm volatile("dmb 2\n" : : : "memory") -#elif !defined(CONFIG_ARC_PLAT_EZNPS) /* CONFIG_ISA_ARCOMPACT */ +#else /* * ARCompact based cores (ARC700) only have SYNC instruction which is super @@ -37,13 +37,6 @@ #define mb() asm volatile("sync\n" : : : "memory") -#else /* CONFIG_ARC_PLAT_EZNPS */ - -#include <plat/ctop.h> - -#define mb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") -#define rmb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory") - #endif #include <asm-generic/barrier.h> diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 50eb3f64a77c..c6606f4d20d6 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -85,7 +85,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * return (old & (1 << nr)) != 0; \ } -#elif !defined(CONFIG_ARC_PLAT_EZNPS) +#else /* !CONFIG_ARC_HAS_LLSC */ /* * Non hardware assisted Atomic-R-M-W @@ -136,55 +136,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * return (old & (1UL << (nr & 0x1f))) != 0; \ } -#else /* CONFIG_ARC_PLAT_EZNPS */ - -#define BIT_OP(op, c_op, asm_op) \ -static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ -{ \ - m += nr >> 5; \ - \ - nr = (1UL << (nr & 0x1f)); \ - if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \ - nr = ~nr; \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - : \ - : "r"(nr), "r"(m), "i"(asm_op) \ - : "r2", "r3", "memory"); \ -} - -#define TEST_N_BIT_OP(op, c_op, asm_op) \ -static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ -{ \ - unsigned long old; \ - \ - m += nr >> 5; \ - \ - nr = old = (1UL << (nr & 0x1f)); \ - if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \ - old = ~old; \ - \ - /* Explicit full memory barrier needed before/after */ \ - smp_mb(); \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - " mov %0, r2" \ - : "+r"(old) \ - : "r"(m), "i"(asm_op) \ - : "r2", "r3", "memory"); \ - \ - smp_mb(); \ - \ - return (old & nr) != 0; \ -} - -#endif /* CONFIG_ARC_PLAT_EZNPS */ +#endif /*************************************** * Non atomic variants @@ -226,15 +178,9 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\ __TEST_N_BIT_OP(op, c_op, asm_op) -#ifndef CONFIG_ARC_PLAT_EZNPS BIT_OPS(set, |, bset) BIT_OPS(clear, & ~, bclr) BIT_OPS(change, ^, bxor) -#else -BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3) -BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3) -BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3) -#endif /* * This routine doesn't need to be atomic. diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index c11398160240..cf8233700c00 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -41,7 +41,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) return prev; } -#elif !defined(CONFIG_ARC_PLAT_EZNPS) +#else /* !CONFIG_ARC_HAS_LLSC */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) @@ -61,33 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) return prev; } -#else /* CONFIG_ARC_PLAT_EZNPS */ - -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) -{ - /* - * Explicit full memory barrier needed before/after - */ - smp_mb(); - - write_aux_reg(CTOP_AUX_GPA1, expected); - - __asm__ __volatile__( - " mov r2, %0\n" - " mov r3, %1\n" - " .word %2\n" - " mov %0, r2" - : "+r"(new) - : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3) - : "r2", "r3", "memory"); - - smp_mb(); - - return new; -} - -#endif /* CONFIG_ARC_HAS_LLSC */ +#endif #define cmpxchg(ptr, o, n) ({ \ (typeof(*(ptr)))__cmpxchg((ptr), \ @@ -104,8 +78,6 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) -#ifndef CONFIG_ARC_PLAT_EZNPS - /* * xchg (reg with memory) based on "Native atomic" EX insn */ @@ -168,44 +140,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, #endif -#else /* CONFIG_ARC_PLAT_EZNPS */ - -static inline unsigned long __xchg(unsigned long val, volatile void *ptr, - int size) -{ - extern unsigned long __xchg_bad_pointer(void); - - switch (size) { - case 4: - /* - * Explicit full memory barrier needed before/after - */ - smp_mb(); - - __asm__ __volatile__( - " mov r2, %0\n" - " mov r3, %1\n" - " .word %2\n" - " mov %0, r2\n" - : "+r"(val) - : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3) - : "r2", "r3", "memory"); - - smp_mb(); - - return val; - } - return __xchg_bad_pointer(); -} - -#define xchg(ptr, with) ({ \ - (typeof(*(ptr)))__xchg((unsigned long)(with), \ - (ptr), \ - sizeof(*(ptr))); \ -}) - -#endif /* CONFIG_ARC_PLAT_EZNPS */ - /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index c3aa775878dc..6dbf5cecc8cc 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -33,10 +33,6 @@ #include <asm/irqflags-compact.h> #include <asm/thread_info.h> /* For THREAD_SIZE */ -#ifdef CONFIG_ARC_PLAT_EZNPS -#include <plat/ctop.h> -#endif - /*-------------------------------------------------------------- * Switch to Kernel Mode stack if SP points to User Mode stack * @@ -189,12 +185,6 @@ PUSHAX lp_start PUSHAX erbta -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - PUSHAX CTOP_AUX_GPA1 - PUSHAX CTOP_AUX_EFLAGS -#endif - lr r10, [ecr] st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */ .endm @@ -211,11 +201,6 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro EXCEPTION_EPILOGUE -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - POPAX CTOP_AUX_EFLAGS - POPAX CTOP_AUX_GPA1 -#endif POPAX erbta POPAX lp_start @@ -278,11 +263,6 @@ PUSHAX lp_start PUSHAX bta_l\LVL\() -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - PUSHAX CTOP_AUX_GPA1 - PUSHAX CTOP_AUX_EFLAGS -#endif .endm /*-------------------------------------------------------------- @@ -295,11 +275,6 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro INTERRUPT_EPILOGUE LVL -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - POPAX CTOP_AUX_EFLAGS - POPAX CTOP_AUX_GPA1 -#endif POPAX bta_l\LVL\() POPAX lp_start @@ -327,13 +302,11 @@ bic \reg, sp, (THREAD_SIZE - 1) .endm -#ifndef CONFIG_ARC_PLAT_EZNPS /* Get CPU-ID of this core */ .macro GET_CPU_ID reg lr \reg, [identity] lsr \reg, \reg, 8 bmsk \reg, \reg, 7 .endm -#endif #endif /* __ASM_ARC_ENTRY_COMPACT_H */ diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 0fcea5bad343..e4031ecd3c8c 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -17,13 +17,6 @@ #include <asm/dsp.h> #include <asm/fpu.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -struct eznps_dp { - unsigned int eflags; - unsigned int gpa1; -}; -#endif - /* Arch specific stuff which needs to be saved per task. * However these items are not so important so as to earn a place in * struct thread_info @@ -38,9 +31,6 @@ struct thread_struct { #ifdef CONFIG_ARC_FPU_SAVE_RESTORE struct arc_fpu fpu; #endif -#ifdef CONFIG_ARC_PLAT_EZNPS - struct eznps_dp dp; -#endif }; #define INIT_THREAD { \ @@ -60,17 +50,8 @@ struct task_struct; * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise * get optimised away by gcc */ -#ifndef CONFIG_EZNPS_MTM_EXT - #define cpu_relax() barrier() -#else - -#define cpu_relax() \ - __asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") - -#endif - #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) @@ -118,25 +99,7 @@ extern unsigned int get_wchan(struct task_struct *p); #define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE) -#ifdef CONFIG_ARC_PLAT_EZNPS -/* NPS architecture defines special window of 129M in user address space for - * special memory areas, when accessing this window the MMU do not use TLB. - * Instead MMU direct the access to: - * 0x57f00000:0x57ffffff -- 1M of closely coupled memory (aka CMEM) - * 0x58000000:0x5fffffff -- 16 huge pages, 8M each, with fixed map (aka FMTs) - * - * CMEM - is the fastest memory we got and its size is 16K. - * FMT - is used to map either to internal/external memory. - * Internal memory is the second fast memory and its size is 16M - * External memory is the biggest memory (16G) and also the slowest. - * - * STACK_TOP need to be PMD align (21bit) that is why we supply 0x57e00000. - */ -#define STACK_TOP 0x57e00000 -#else #define STACK_TOP TASK_SIZE -#endif - #define STACK_TOP_MAX STACK_TOP /* This decides where the kernel will search for a free chunk of vm diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 2fdb87addadc..4c3c9be5bd16 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -16,11 +16,6 @@ #ifdef CONFIG_ISA_ARCOMPACT struct pt_regs { -#ifdef CONFIG_ARC_PLAT_EZNPS - unsigned long eflags; /* Extended FLAGS */ - unsigned long gpa1; /* General Purpose Aux */ -#endif - /* Real registers */ unsigned long bta; /* bta_l1, bta_l2, erbta */ diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index 61a97fe70b86..01f85478170d 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -9,11 +9,7 @@ #include <linux/types.h> #include <uapi/asm/setup.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -#define COMMAND_LINE_SIZE 2048 -#else #define COMMAND_LINE_SIZE 256 -#endif /* * Data structure to map a ID to string diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 94bbed88e3fc..192871608925 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -232,15 +232,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" -#ifdef CONFIG_EZNPS_MTM_EXT - " .word %3 \n" -#endif " breq %0, %2, 1b \n" : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) -#ifdef CONFIG_EZNPS_MTM_EXT - , "i"(CTOP_INST_SCHD_RW) -#endif : "memory"); smp_mb(); diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h index 4a3d67989d19..1f85de8288b1 100644 --- a/arch/arc/include/asm/switch_to.h +++ b/arch/arc/include/asm/switch_to.h @@ -12,19 +12,10 @@ #include <asm/dsp-impl.h> #include <asm/fpu.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -extern void dp_save_restore(struct task_struct *p, struct task_struct *n); -#define ARC_EZNPS_DP_PREV(p, n) dp_save_restore(p, n) -#else -#define ARC_EZNPS_DP_PREV(p, n) - -#endif /* !CONFIG_ARC_PLAT_EZNPS */ - struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n); #define switch_to(prev, next, last) \ do { \ - ARC_EZNPS_DP_PREV(prev, next); \ dsp_save_restore(prev, next); \ fpu_save_restore(prev, next); \ last = __switch_to(prev, next);\ |