diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic-long.h | 55 | ||||
-rw-r--r-- | include/asm-generic/atomic.h | 4 | ||||
-rw-r--r-- | include/asm-generic/mutex-dec.h | 8 | ||||
-rw-r--r-- | include/asm-generic/mutex-xchg.h | 10 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 47 | ||||
-rw-r--r-- | include/asm-generic/preempt.h | 2 | ||||
-rw-r--r-- | include/asm-generic/qrwlock_types.h | 4 | ||||
-rw-r--r-- | include/asm-generic/rwsem.h | 21 |
8 files changed, 100 insertions, 51 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index a94cbebbc33d..eb1973bad80b 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -35,7 +35,7 @@ typedef atomic_t atomic_long_t; #endif #define ATOMIC_LONG_READ_OP(mo) \ -static inline long atomic_long_read##mo(atomic_long_t *l) \ +static inline long atomic_long_read##mo(const atomic_long_t *l) \ { \ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ \ @@ -112,19 +112,23 @@ static inline void atomic_long_dec(atomic_long_t *l) ATOMIC_LONG_PFX(_dec)(v); } -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - ATOMIC_LONG_PFX(_add)(i, v); +#define ATOMIC_LONG_OP(op) \ +static inline void \ +atomic_long_##op(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + ATOMIC_LONG_PFX(_##op)(i, v); \ } -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; +ATOMIC_LONG_OP(add) +ATOMIC_LONG_OP(sub) +ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(or) +ATOMIC_LONG_OP(xor) +ATOMIC_LONG_OP(andnot) - ATOMIC_LONG_PFX(_sub)(i, v); -} +#undef ATOMIC_LONG_OP static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) { @@ -154,19 +158,24 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l) return ATOMIC_LONG_PFX(_add_negative)(i, v); } -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - return (long)ATOMIC_LONG_PFX(_inc_return)(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - return (long)ATOMIC_LONG_PFX(_dec_return)(v); +#define ATOMIC_LONG_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_##op##_return##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \ } +ATOMIC_LONG_INC_DEC_OP(inc,) +ATOMIC_LONG_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_INC_DEC_OP(inc, _release) +ATOMIC_LONG_INC_DEC_OP(dec,) +ATOMIC_LONG_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_INC_DEC_OP(dec, _release) + +#undef ATOMIC_LONG_INC_DEC_OP static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) { diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index d4d7e337fdcb..74f1a3704d7a 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -127,7 +127,7 @@ ATOMIC_OP(xor, ^) * Atomically reads the value of @v. */ #ifndef atomic_read -#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic_read(v) READ_ONCE((v)->counter) #endif /** @@ -137,7 +137,7 @@ ATOMIC_OP(xor, ^) * * Atomically sets the value of @v to @i. */ -#define atomic_set(v, i) (((v)->counter) = (i)) +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #include <linux/irqflags.h> diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index d4f9fb4e53df..fd694cfd678a 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -20,7 +20,7 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - if (unlikely(atomic_dec_return(count) < 0)) + if (unlikely(atomic_dec_return_acquire(count) < 0)) fail_fn(count); } @@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count) { - if (unlikely(atomic_dec_return(count) < 0)) + if (unlikely(atomic_dec_return_acquire(count) < 0)) return -1; return 0; } @@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - if (unlikely(atomic_inc_return(count) <= 0)) + if (unlikely(atomic_inc_return_release(count) <= 0)) fail_fn(count); } @@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - if (likely(atomic_cmpxchg(count, 1, 0) == 1)) + if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1)) return 1; return 0; } diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index f169ec064785..a6b4a7bd6ac9 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) * to ensure that any waiting tasks are woken up by the * unlock slow path. */ - if (likely(atomic_xchg(count, -1) != 1)) + if (likely(atomic_xchg_acquire(count, -1) != 1)) fail_fn(count); } @@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count) { - if (unlikely(atomic_xchg(count, 0) != 1)) + if (unlikely(atomic_xchg_acquire(count, 0) != 1)) if (likely(atomic_xchg(count, -1) != 1)) return -1; return 0; @@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - if (unlikely(atomic_xchg(count, 1) != 0)) + if (unlikely(atomic_xchg_release(count, 1) != 0)) fail_fn(count); } @@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int prev = atomic_xchg(count, 0); + int prev = atomic_xchg_acquire(count, 0); if (unlikely(prev < 0)) { /* @@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) * owner's unlock path needlessly, but that's not a problem * in practice. ] */ - prev = atomic_xchg(count, prev); + prev = atomic_xchg_acquire(count, prev); if (prev < 0) prev = 0; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 29c57b2cb344..14b0ff32fb9f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -30,9 +30,19 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -64,12 +74,12 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } -#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#else static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - BUG(); + BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -81,8 +91,21 @@ int ptep_clear_flush_young(struct vm_area_struct *vma, #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH -int pmdp_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR @@ -175,11 +198,11 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } -#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#else static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - BUG(); + BUILD_BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -248,7 +271,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { - BUG(); + BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -482,6 +505,16 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) return pmd; } +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { return pte; diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 0bec580a4885..5d8ffa3e6f8c 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -24,7 +24,7 @@ static __always_inline void preempt_count_set(int pc) * must be macros to avoid header recursion hell */ #define init_task_preempt_count(p) do { \ - task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ } while (0) #define init_idle_preempt_count(p, cpu) do { \ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index 4d76f24df518..0abc6b6062fb 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -10,12 +10,12 @@ typedef struct qrwlock { atomic_t cnts; - arch_spinlock_t lock; + arch_spinlock_t wait_lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { \ .cnts = ATOMIC_INIT(0), \ - .lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ } #endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index d48bf5a95cc1..d6d5dc98d7da 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -33,7 +33,7 @@ */ static inline void __down_read(struct rw_semaphore *sem) { - if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) + if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0)) rwsem_down_read_failed(sem); } @@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) long tmp; while ((tmp = sem->count) >= 0) { - if (tmp == cmpxchg(&sem->count, tmp, + if (tmp == cmpxchg_acquire(&sem->count, tmp, tmp + RWSEM_ACTIVE_READ_BIAS)) { return 1; } @@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { long tmp; - tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, (atomic_long_t *)&sem->count); if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) rwsem_down_write_failed(sem); @@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) { long tmp; - tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } @@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem) { long tmp; - tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); + tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count); if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) rwsem_wake(sem); } @@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem) */ static inline void __up_write(struct rw_semaphore *sem) { - if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, (atomic_long_t *)&sem->count) < 0)) rwsem_wake(sem); } @@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) { long tmp; - tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, + /* + * When downgrading from exclusive to shared ownership, + * anything inside the write-locked region cannot leak + * into the read side. In contrast, anything in the + * read-locked region is ok to be re-ordered into the + * write side. As such, rely on RELEASE semantics. + */ + tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, (atomic_long_t *)&sem->count); if (tmp < 0) rwsem_downgrade_wake(sem); |