diff options
Diffstat (limited to 'include/linux')
110 files changed, 5541 insertions, 719 deletions
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h new file mode 100644 index 000000000000..bcb6aa27cfa6 --- /dev/null +++ b/include/linux/atomic-arch-fallback.h @@ -0,0 +1,2291 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#include <linux/compiler.h> + +#ifndef arch_xchg_relaxed +#define arch_xchg_relaxed arch_xchg +#define arch_xchg_acquire arch_xchg +#define arch_xchg_release arch_xchg +#else /* arch_xchg_relaxed */ + +#ifndef arch_xchg_acquire +#define arch_xchg_acquire(...) \ + __atomic_op_acquire(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg_release +#define arch_xchg_release(...) \ + __atomic_op_release(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg +#define arch_xchg(...) \ + __atomic_op_fence(arch_xchg, __VA_ARGS__) +#endif + +#endif /* arch_xchg_relaxed */ + +#ifndef arch_cmpxchg_relaxed +#define arch_cmpxchg_relaxed arch_cmpxchg +#define arch_cmpxchg_acquire arch_cmpxchg +#define arch_cmpxchg_release arch_cmpxchg +#else /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg_acquire +#define arch_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg_release +#define arch_cmpxchg_release(...) \ + __atomic_op_release(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg +#define arch_cmpxchg(...) \ + __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg64_relaxed +#define arch_cmpxchg64_relaxed arch_cmpxchg64 +#define arch_cmpxchg64_acquire arch_cmpxchg64 +#define arch_cmpxchg64_release arch_cmpxchg64 +#else /* arch_cmpxchg64_relaxed */ + +#ifndef arch_cmpxchg64_acquire +#define arch_cmpxchg64_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64_release +#define arch_cmpxchg64_release(...) \ + __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64 +#define arch_cmpxchg64(...) \ + __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg64_relaxed */ + +#ifndef arch_atomic_read_acquire +static __always_inline int +arch_atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic_read_acquire arch_atomic_read_acquire +#endif + +#ifndef arch_atomic_set_release +static __always_inline void +arch_atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic_set_release arch_atomic_set_release +#endif + +#ifndef arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return +#else /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_add_return_acquire +static __always_inline int +arch_atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#endif + +#ifndef arch_atomic_add_return_release +static __always_inline int +arch_atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_add_return_relaxed(i, v); +} +#define arch_atomic_add_return_release arch_atomic_add_return_release +#endif + +#ifndef arch_atomic_add_return +static __always_inline int +arch_atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_add_return arch_atomic_add_return +#endif + +#endif /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add +#else /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_fetch_add_acquire +static __always_inline int +arch_atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#endif + +#ifndef arch_atomic_fetch_add_release +static __always_inline int +arch_atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_add_relaxed(i, v); +} +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#endif + +#ifndef arch_atomic_fetch_add +static __always_inline int +arch_atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_add arch_atomic_fetch_add +#endif + +#endif /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return +#else /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_sub_return_acquire +static __always_inline int +arch_atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#endif + +#ifndef arch_atomic_sub_return_release +static __always_inline int +arch_atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_sub_return_relaxed(i, v); +} +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#endif + +#ifndef arch_atomic_sub_return +static __always_inline int +arch_atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_sub_return arch_atomic_sub_return +#endif + +#endif /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub +#else /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_fetch_sub_acquire +static __always_inline int +arch_atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#endif + +#ifndef arch_atomic_fetch_sub_release +static __always_inline int +arch_atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_sub_relaxed(i, v); +} +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#endif + +#ifndef arch_atomic_fetch_sub +static __always_inline int +arch_atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#endif + +#endif /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_inc +static __always_inline void +arch_atomic_inc(atomic_t *v) +{ + arch_atomic_add(1, v); +} +#define arch_atomic_inc arch_atomic_inc +#endif + +#ifndef arch_atomic_inc_return_relaxed +#ifdef arch_atomic_inc_return +#define arch_atomic_inc_return_acquire arch_atomic_inc_return +#define arch_atomic_inc_return_release arch_atomic_inc_return +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return +#endif /* arch_atomic_inc_return */ + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + return arch_atomic_add_return_acquire(1, v); +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + return arch_atomic_add_return_release(1, v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return_relaxed +static __always_inline int +arch_atomic_inc_return_relaxed(atomic_t *v) +{ + return arch_atomic_add_return_relaxed(1, v); +} +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed +#endif + +#else /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_inc_return_relaxed(v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#endif /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_fetch_inc_relaxed +#ifdef arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc +#endif /* arch_atomic_fetch_inc */ + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + return arch_atomic_fetch_add_acquire(1, v); +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + return arch_atomic_fetch_add_release(1, v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc_relaxed +static __always_inline int +arch_atomic_fetch_inc_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_add_relaxed(1, v); +} +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed +#endif + +#else /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_inc_relaxed(v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#endif /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_dec +static __always_inline void +arch_atomic_dec(atomic_t *v) +{ + arch_atomic_sub(1, v); +} +#define arch_atomic_dec arch_atomic_dec +#endif + +#ifndef arch_atomic_dec_return_relaxed +#ifdef arch_atomic_dec_return +#define arch_atomic_dec_return_acquire arch_atomic_dec_return +#define arch_atomic_dec_return_release arch_atomic_dec_return +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return +#endif /* arch_atomic_dec_return */ + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + return arch_atomic_sub_return_acquire(1, v); +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + return arch_atomic_sub_return_release(1, v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return_relaxed +static __always_inline int +arch_atomic_dec_return_relaxed(atomic_t *v) +{ + return arch_atomic_sub_return_relaxed(1, v); +} +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed +#endif + +#else /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_dec_return_relaxed(v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#endif /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_fetch_dec_relaxed +#ifdef arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec +#endif /* arch_atomic_fetch_dec */ + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + return arch_atomic_fetch_sub_acquire(1, v); +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + return arch_atomic_fetch_sub_release(1, v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec_relaxed +static __always_inline int +arch_atomic_fetch_dec_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_sub_relaxed(1, v); +} +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed +#endif + +#else /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_dec_relaxed(v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#endif /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and +#else /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_fetch_and_acquire +static __always_inline int +arch_atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#endif + +#ifndef arch_atomic_fetch_and_release +static __always_inline int +arch_atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_and_relaxed(i, v); +} +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#endif + +#ifndef arch_atomic_fetch_and +static __always_inline int +arch_atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_and arch_atomic_fetch_and +#endif + +#endif /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_andnot +static __always_inline void +arch_atomic_andnot(int i, atomic_t *v) +{ + arch_atomic_and(~i, v); +} +#define arch_atomic_andnot arch_atomic_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +#ifdef arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot +#endif /* arch_atomic_fetch_andnot */ + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_acquire(~i, v); +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_release(~i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +static __always_inline int +arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_relaxed(~i, v); +} +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#endif + +#else /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_andnot_relaxed(i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#endif /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or +#else /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_or_acquire +static __always_inline int +arch_atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#endif + +#ifndef arch_atomic_fetch_or_release +static __always_inline int +arch_atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_or_relaxed(i, v); +} +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#endif + +#ifndef arch_atomic_fetch_or +static __always_inline int +arch_atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_or arch_atomic_fetch_or +#endif + +#endif /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor +#else /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_fetch_xor_acquire +static __always_inline int +arch_atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#endif + +#ifndef arch_atomic_fetch_xor_release +static __always_inline int +arch_atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_xor_relaxed(i, v); +} +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#endif + +#ifndef arch_atomic_fetch_xor +static __always_inline int +arch_atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#endif + +#endif /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_xchg_relaxed +#define arch_atomic_xchg_acquire arch_atomic_xchg +#define arch_atomic_xchg_release arch_atomic_xchg +#define arch_atomic_xchg_relaxed arch_atomic_xchg +#else /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_xchg_acquire +static __always_inline int +arch_atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = arch_atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#endif + +#ifndef arch_atomic_xchg_release +static __always_inline int +arch_atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return arch_atomic_xchg_relaxed(v, i); +} +#define arch_atomic_xchg_release arch_atomic_xchg_release +#endif + +#ifndef arch_atomic_xchg +static __always_inline int +arch_atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_xchg arch_atomic_xchg +#endif + +#endif /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg +#else /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_acquire +static __always_inline int +arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#endif + +#ifndef arch_atomic_cmpxchg_release +static __always_inline int +arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#endif + +#ifndef arch_atomic_cmpxchg +static __always_inline int +arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg +#endif + +#endif /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_relaxed +#ifdef arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg +#endif /* arch_atomic_try_cmpxchg */ + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#endif /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_sub_and_test +/** + * arch_atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_sub_and_test(int i, atomic_t *v) +{ + return arch_atomic_sub_return(i, v) == 0; +} +#define arch_atomic_sub_and_test arch_atomic_sub_and_test +#endif + +#ifndef arch_atomic_dec_and_test +/** + * arch_atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic_dec_and_test(atomic_t *v) +{ + return arch_atomic_dec_return(v) == 0; +} +#define arch_atomic_dec_and_test arch_atomic_dec_and_test +#endif + +#ifndef arch_atomic_inc_and_test +/** + * arch_atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_inc_and_test(atomic_t *v) +{ + return arch_atomic_inc_return(v) == 0; +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test +#endif + +#ifndef arch_atomic_add_negative +/** + * arch_atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative(int i, atomic_t *v) +{ + return arch_atomic_add_return(i, v) < 0; +} +#define arch_atomic_add_negative arch_atomic_add_negative +#endif + +#ifndef arch_atomic_fetch_add_unless +/** + * arch_atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#endif + +#ifndef arch_atomic_add_unless +/** + * arch_atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic_add_unless(atomic_t *v, int a, int u) +{ + return arch_atomic_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic_add_unless arch_atomic_add_unless +#endif + +#ifndef arch_atomic_inc_not_zero +/** + * arch_atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic_inc_not_zero(atomic_t *v) +{ + return arch_atomic_add_unless(v, 1, 0); +} +#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero +#endif + +#ifndef arch_atomic_inc_unless_negative +static __always_inline bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative +#endif + +#ifndef arch_atomic_dec_unless_positive +static __always_inline bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive +#endif + +#ifndef arch_atomic_dec_if_positive +static __always_inline int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = arch_atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif + +#ifndef arch_atomic64_read_acquire +static __always_inline s64 +arch_atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic64_read_acquire arch_atomic64_read_acquire +#endif + +#ifndef arch_atomic64_set_release +static __always_inline void +arch_atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic64_set_release arch_atomic64_set_release +#endif + +#ifndef arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return +#else /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_add_return_acquire +static __always_inline s64 +arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#endif + +#ifndef arch_atomic64_add_return_release +static __always_inline s64 +arch_atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_add_return_relaxed(i, v); +} +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#endif + +#ifndef arch_atomic64_add_return +static __always_inline s64 +arch_atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_add_return arch_atomic64_add_return +#endif + +#endif /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add +#else /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_fetch_add_acquire +static __always_inline s64 +arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#endif + +#ifndef arch_atomic64_fetch_add_release +static __always_inline s64 +arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_add_relaxed(i, v); +} +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#endif + +#ifndef arch_atomic64_fetch_add +static __always_inline s64 +arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#endif + +#endif /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return +#else /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_sub_return_acquire +static __always_inline s64 +arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#endif + +#ifndef arch_atomic64_sub_return_release +static __always_inline s64 +arch_atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_sub_return_relaxed(i, v); +} +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#endif + +#ifndef arch_atomic64_sub_return +static __always_inline s64 +arch_atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_sub_return arch_atomic64_sub_return +#endif + +#endif /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub +#else /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_fetch_sub_acquire +static __always_inline s64 +arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#endif + +#ifndef arch_atomic64_fetch_sub_release +static __always_inline s64 +arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_sub_relaxed(i, v); +} +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#endif + +#ifndef arch_atomic64_fetch_sub +static __always_inline s64 +arch_atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#endif + +#endif /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_inc +static __always_inline void +arch_atomic64_inc(atomic64_t *v) +{ + arch_atomic64_add(1, v); +} +#define arch_atomic64_inc arch_atomic64_inc +#endif + +#ifndef arch_atomic64_inc_return_relaxed +#ifdef arch_atomic64_inc_return +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return +#define arch_atomic64_inc_return_release arch_atomic64_inc_return +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return +#endif /* arch_atomic64_inc_return */ + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + return arch_atomic64_add_return_acquire(1, v); +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + return arch_atomic64_add_return_release(1, v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return_relaxed +static __always_inline s64 +arch_atomic64_inc_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_add_return_relaxed(1, v); +} +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed +#endif + +#else /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_inc_return_relaxed(v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#endif /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_fetch_inc_relaxed +#ifdef arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc +#endif /* arch_atomic64_fetch_inc */ + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_add_acquire(1, v); +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + return arch_atomic64_fetch_add_release(1, v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc_relaxed +static __always_inline s64 +arch_atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_add_relaxed(1, v); +} +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed +#endif + +#else /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_inc_relaxed(v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#endif /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_dec +static __always_inline void +arch_atomic64_dec(atomic64_t *v) +{ + arch_atomic64_sub(1, v); +} +#define arch_atomic64_dec arch_atomic64_dec +#endif + +#ifndef arch_atomic64_dec_return_relaxed +#ifdef arch_atomic64_dec_return +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return +#define arch_atomic64_dec_return_release arch_atomic64_dec_return +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return +#endif /* arch_atomic64_dec_return */ + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + return arch_atomic64_sub_return_acquire(1, v); +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + return arch_atomic64_sub_return_release(1, v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return_relaxed +static __always_inline s64 +arch_atomic64_dec_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_sub_return_relaxed(1, v); +} +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed +#endif + +#else /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_dec_return_relaxed(v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#endif /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_fetch_dec_relaxed +#ifdef arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec +#endif /* arch_atomic64_fetch_dec */ + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_acquire(1, v); +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_release(1, v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec_relaxed +static __always_inline s64 +arch_atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(1, v); +} +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed +#endif + +#else /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_dec_relaxed(v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#endif /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and +#else /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_fetch_and_acquire +static __always_inline s64 +arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#endif + +#ifndef arch_atomic64_fetch_and_release +static __always_inline s64 +arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_and_relaxed(i, v); +} +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#endif + +#ifndef arch_atomic64_fetch_and +static __always_inline s64 +arch_atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#endif + +#endif /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_andnot +static __always_inline void +arch_atomic64_andnot(s64 i, atomic64_t *v) +{ + arch_atomic64_and(~i, v); +} +#define arch_atomic64_andnot arch_atomic64_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +#ifdef arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot +#endif /* arch_atomic64_fetch_andnot */ + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_acquire(~i, v); +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_release(~i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +static __always_inline s64 +arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_relaxed(~i, v); +} +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#endif + +#else /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#endif /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or +#else /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_or_acquire +static __always_inline s64 +arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#endif + +#ifndef arch_atomic64_fetch_or_release +static __always_inline s64 +arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_or_relaxed(i, v); +} +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#endif + +#ifndef arch_atomic64_fetch_or +static __always_inline s64 +arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#endif + +#endif /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor +#else /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_fetch_xor_acquire +static __always_inline s64 +arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#endif + +#ifndef arch_atomic64_fetch_xor_release +static __always_inline s64 +arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_xor_relaxed(i, v); +} +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#endif + +#ifndef arch_atomic64_fetch_xor +static __always_inline s64 +arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#endif + +#endif /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic64_xchg +#define arch_atomic64_xchg_release arch_atomic64_xchg +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg +#else /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_xchg_acquire +static __always_inline s64 +arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire +#endif + +#ifndef arch_atomic64_xchg_release +static __always_inline s64 +arch_atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return arch_atomic64_xchg_relaxed(v, i); +} +#define arch_atomic64_xchg_release arch_atomic64_xchg_release +#endif + +#ifndef arch_atomic64_xchg +static __always_inline s64 +arch_atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_xchg arch_atomic64_xchg +#endif + +#endif /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg +#else /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_acquire +static __always_inline s64 +arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_cmpxchg_release +static __always_inline s64 +arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release +#endif + +#ifndef arch_atomic64_cmpxchg +static __always_inline s64 +arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg +#endif + +#endif /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_relaxed +#ifdef arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg +#endif /* arch_atomic64_try_cmpxchg */ + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#endif /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_sub_and_test +/** + * arch_atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return arch_atomic64_sub_return(i, v) == 0; +} +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test +#endif + +#ifndef arch_atomic64_dec_and_test +/** + * arch_atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic64_dec_and_test(atomic64_t *v) +{ + return arch_atomic64_dec_return(v) == 0; +} +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test +#endif + +#ifndef arch_atomic64_inc_and_test +/** + * arch_atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_inc_and_test(atomic64_t *v) +{ + return arch_atomic64_inc_return(v) == 0; +} +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test +#endif + +#ifndef arch_atomic64_add_negative +/** + * arch_atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return(i, v) < 0; +} +#define arch_atomic64_add_negative arch_atomic64_add_negative +#endif + +#ifndef arch_atomic64_fetch_add_unless +/** + * arch_atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline s64 +arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#endif + +#ifndef arch_atomic64_add_unless +/** + * arch_atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return arch_atomic64_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic64_add_unless arch_atomic64_add_unless +#endif + +#ifndef arch_atomic64_inc_not_zero +/** + * arch_atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic64_inc_not_zero(atomic64_t *v) +{ + return arch_atomic64_add_unless(v, 1, 0); +} +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero +#endif + +#ifndef arch_atomic64_inc_unless_negative +static __always_inline bool +arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative +#endif + +#ifndef arch_atomic64_dec_unless_positive +static __always_inline bool +arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive +#endif + +#ifndef arch_atomic64_dec_if_positive +static __always_inline s64 +arch_atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = arch_atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive +#endif + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ +// 90cd26cfd69d2250303d654955a0cc12620fb91b diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index a7d240e465c0..2c4927bf7b8d 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -6,6 +6,8 @@ #ifndef _LINUX_ATOMIC_FALLBACK_H #define _LINUX_ATOMIC_FALLBACK_H +#include <linux/compiler.h> + #ifndef xchg_relaxed #define xchg_relaxed xchg #define xchg_acquire xchg @@ -76,7 +78,7 @@ #endif /* cmpxchg64_relaxed */ #ifndef atomic_read_acquire -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { return smp_load_acquire(&(v)->counter); @@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v) #endif #ifndef atomic_set_release -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { smp_store_release(&(v)->counter, i); @@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i) #else /* atomic_add_return_relaxed */ #ifndef atomic_add_return_acquire -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { int ret = atomic_add_return_relaxed(i, v); @@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_add_return_release -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #ifndef atomic_add_return -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { int ret; @@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v) #else /* atomic_fetch_add_relaxed */ #ifndef atomic_fetch_add_acquire -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { int ret = atomic_fetch_add_relaxed(i, v); @@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_add_release -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_add -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { int ret; @@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v) #else /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_acquire -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { int ret = atomic_sub_return_relaxed(i, v); @@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_sub_return_release -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #ifndef atomic_sub_return -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { int ret; @@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v) #else /* atomic_fetch_sub_relaxed */ #ifndef atomic_fetch_sub_acquire -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { int ret = atomic_fetch_sub_relaxed(i, v); @@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub_release -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { int ret; @@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ #ifndef atomic_inc -static inline void +static __always_inline void atomic_inc(atomic_t *v) { atomic_add(1, v); @@ -278,7 +280,7 @@ atomic_inc(atomic_t *v) #endif /* atomic_inc_return */ #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { return atomic_add_return(1, v); @@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v) #endif #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { return atomic_add_return_acquire(1, v); @@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { return atomic_add_return_release(1, v); @@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return_relaxed -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { return atomic_add_return_relaxed(1, v); @@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v) #else /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { int ret = atomic_inc_return_relaxed(v); @@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_release_fence(); @@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { int ret; @@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_fetch_inc */ #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { return atomic_fetch_add(1, v); @@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v) #endif #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { return atomic_fetch_add_acquire(1, v); @@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { return atomic_fetch_add_release(1, v); @@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc_relaxed -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { return atomic_fetch_add_relaxed(1, v); @@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #else /* atomic_fetch_inc_relaxed */ #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { int ret = atomic_fetch_inc_relaxed(v); @@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_release_fence(); @@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { int ret; @@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ #ifndef atomic_dec -static inline void +static __always_inline void atomic_dec(atomic_t *v) { atomic_sub(1, v); @@ -449,7 +451,7 @@ atomic_dec(atomic_t *v) #endif /* atomic_dec_return */ #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { return atomic_sub_return(1, v); @@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v) #endif #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { return atomic_sub_return_acquire(1, v); @@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { return atomic_sub_return_release(1, v); @@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return_relaxed -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { return atomic_sub_return_relaxed(1, v); @@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v) #else /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { int ret = atomic_dec_return_relaxed(v); @@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_release_fence(); @@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { int ret; @@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_fetch_dec */ #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { return atomic_fetch_sub(1, v); @@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v) #endif #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { return atomic_fetch_sub_acquire(1, v); @@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { return atomic_fetch_sub_release(1, v); @@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec_relaxed -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { return atomic_fetch_sub_relaxed(1, v); @@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #else /* atomic_fetch_dec_relaxed */ #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { int ret = atomic_fetch_dec_relaxed(v); @@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_release_fence(); @@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { int ret; @@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v) #else /* atomic_fetch_and_relaxed */ #ifndef atomic_fetch_and_acquire -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { int ret = atomic_fetch_and_relaxed(i, v); @@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_and_release -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_and -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { int ret; @@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ #ifndef atomic_andnot -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); @@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot */ #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { return atomic_fetch_and(~i, v); @@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { return atomic_fetch_and_acquire(~i, v); @@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { return atomic_fetch_and_release(~i, v); @@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_relaxed -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { return atomic_fetch_and_relaxed(~i, v); @@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #else /* atomic_fetch_andnot_relaxed */ #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { int ret = atomic_fetch_andnot_relaxed(i, v); @@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { int ret; @@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #else /* atomic_fetch_or_relaxed */ #ifndef atomic_fetch_or_acquire -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { int ret = atomic_fetch_or_relaxed(i, v); @@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_or_release -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_or -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { int ret; @@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v) #else /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_acquire -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { int ret = atomic_fetch_xor_relaxed(i, v); @@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor_release -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { int ret; @@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v) #else /* atomic_xchg_relaxed */ #ifndef atomic_xchg_acquire -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { int ret = atomic_xchg_relaxed(v, i); @@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #ifndef atomic_xchg_release -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_release_fence(); @@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #ifndef atomic_xchg -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { int ret; @@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i) #else /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_acquire -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { int ret = atomic_cmpxchg_relaxed(v, old, new); @@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg_release -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_release_fence(); @@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_try_cmpxchg */ #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { int r, o = *old; @@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { int r, o = *old; @@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { int r, o = *old; @@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { int r, o = *old; @@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #else /* atomic_try_cmpxchg_relaxed */ #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { bool ret = atomic_try_cmpxchg_relaxed(v, old, new); @@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_release_fence(); @@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { bool ret; @@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { return atomic_sub_return(i, v) == 0; @@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { return atomic_dec_return(v) == 0; @@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { return atomic_inc_return(v) == 0; @@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c = atomic_read(v); @@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { return atomic_fetch_add_unless(v, a, u) != u; @@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { return atomic_add_unless(v, 1, 0); @@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v) #endif #ifndef atomic_inc_unless_negative -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { int c = atomic_read(v); @@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v) #endif #ifndef atomic_dec_unless_positive -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { int c = atomic_read(v); @@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v) #endif #ifndef atomic_dec_if_positive -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { int dec, c = atomic_read(v); @@ -1178,15 +1180,12 @@ atomic_dec_if_positive(atomic_t *v) #define atomic_dec_if_positive atomic_dec_if_positive #endif -#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif #ifndef atomic64_read_acquire -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { return smp_load_acquire(&(v)->counter); @@ -1195,7 +1194,7 @@ atomic64_read_acquire(const atomic64_t *v) #endif #ifndef atomic64_set_release -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { smp_store_release(&(v)->counter, i); @@ -1210,7 +1209,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #else /* atomic64_add_return_relaxed */ #ifndef atomic64_add_return_acquire -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_add_return_relaxed(i, v); @@ -1221,7 +1220,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return_release -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1231,7 +1230,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { s64 ret; @@ -1252,7 +1251,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_add_relaxed */ #ifndef atomic64_fetch_add_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_add_relaxed(i, v); @@ -1263,7 +1262,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add_release -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1273,7 +1272,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { s64 ret; @@ -1294,7 +1293,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #else /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_acquire -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_sub_return_relaxed(i, v); @@ -1305,7 +1304,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return_release -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1315,7 +1314,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { s64 ret; @@ -1336,7 +1335,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_fetch_sub_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_sub_relaxed(i, v); @@ -1347,7 +1346,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub_release -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1357,7 +1356,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { s64 ret; @@ -1372,7 +1371,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_inc -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { atomic64_add(1, v); @@ -1388,7 +1387,7 @@ atomic64_inc(atomic64_t *v) #endif /* atomic64_inc_return */ #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { return atomic64_add_return(1, v); @@ -1397,7 +1396,7 @@ atomic64_inc_return(atomic64_t *v) #endif #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { return atomic64_add_return_acquire(1, v); @@ -1406,7 +1405,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { return atomic64_add_return_release(1, v); @@ -1415,7 +1414,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return_relaxed -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { return atomic64_add_return_relaxed(1, v); @@ -1426,7 +1425,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #else /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { s64 ret = atomic64_inc_return_relaxed(v); @@ -1437,7 +1436,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1447,7 +1446,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { s64 ret; @@ -1469,7 +1468,7 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_fetch_inc */ #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { return atomic64_fetch_add(1, v); @@ -1478,7 +1477,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { return atomic64_fetch_add_acquire(1, v); @@ -1487,7 +1486,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { return atomic64_fetch_add_release(1, v); @@ -1496,7 +1495,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { return atomic64_fetch_add_relaxed(1, v); @@ -1507,7 +1506,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #else /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_inc_relaxed(v); @@ -1518,7 +1517,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_release_fence(); @@ -1528,7 +1527,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { s64 ret; @@ -1543,7 +1542,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_dec -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { atomic64_sub(1, v); @@ -1559,7 +1558,7 @@ atomic64_dec(atomic64_t *v) #endif /* atomic64_dec_return */ #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { return atomic64_sub_return(1, v); @@ -1568,7 +1567,7 @@ atomic64_dec_return(atomic64_t *v) #endif #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { return atomic64_sub_return_acquire(1, v); @@ -1577,7 +1576,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { return atomic64_sub_return_release(1, v); @@ -1586,7 +1585,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return_relaxed -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { return atomic64_sub_return_relaxed(1, v); @@ -1597,7 +1596,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #else /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { s64 ret = atomic64_dec_return_relaxed(v); @@ -1608,7 +1607,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1618,7 +1617,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { s64 ret; @@ -1640,7 +1639,7 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_fetch_dec */ #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { return atomic64_fetch_sub(1, v); @@ -1649,7 +1648,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { return atomic64_fetch_sub_acquire(1, v); @@ -1658,7 +1657,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { return atomic64_fetch_sub_release(1, v); @@ -1667,7 +1666,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { return atomic64_fetch_sub_relaxed(1, v); @@ -1678,7 +1677,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #else /* atomic64_fetch_dec_relaxed */ #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_dec_relaxed(v); @@ -1689,7 +1688,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_release_fence(); @@ -1699,7 +1698,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { s64 ret; @@ -1720,7 +1719,7 @@ atomic64_fetch_dec(atomic64_t *v) #else /* atomic64_fetch_and_relaxed */ #ifndef atomic64_fetch_and_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_and_relaxed(i, v); @@ -1731,7 +1730,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and_release -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1741,7 +1740,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { s64 ret; @@ -1756,7 +1755,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ #ifndef atomic64_andnot -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { atomic64_and(~i, v); @@ -1772,7 +1771,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot */ #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { return atomic64_fetch_and(~i, v); @@ -1781,7 +1780,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { return atomic64_fetch_and_acquire(~i, v); @@ -1790,7 +1789,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { return atomic64_fetch_and_release(~i, v); @@ -1799,7 +1798,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { return atomic64_fetch_and_relaxed(~i, v); @@ -1810,7 +1809,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #else /* atomic64_fetch_andnot_relaxed */ #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_andnot_relaxed(i, v); @@ -1821,7 +1820,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1831,7 +1830,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { s64 ret; @@ -1852,7 +1851,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #else /* atomic64_fetch_or_relaxed */ #ifndef atomic64_fetch_or_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_or_relaxed(i, v); @@ -1863,7 +1862,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or_release -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1873,7 +1872,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { s64 ret; @@ -1894,7 +1893,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #else /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_xor_relaxed(i, v); @@ -1905,7 +1904,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor_release -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1915,7 +1914,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { s64 ret; @@ -1936,7 +1935,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #else /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_acquire -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { s64 ret = atomic64_xchg_relaxed(v, i); @@ -1947,7 +1946,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg_release -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_release_fence(); @@ -1957,7 +1956,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { s64 ret; @@ -1978,7 +1977,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #else /* atomic64_cmpxchg_relaxed */ #ifndef atomic64_cmpxchg_acquire -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { s64 ret = atomic64_cmpxchg_relaxed(v, old, new); @@ -1989,7 +1988,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg_release -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_release_fence(); @@ -1999,7 +1998,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { s64 ret; @@ -2021,7 +2020,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_try_cmpxchg */ #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2034,7 +2033,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2047,7 +2046,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2060,7 +2059,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2075,7 +2074,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #else /* atomic64_try_cmpxchg_relaxed */ #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); @@ -2086,7 +2085,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_release_fence(); @@ -2096,7 +2095,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { bool ret; @@ -2120,7 +2119,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { return atomic64_sub_return(i, v) == 0; @@ -2137,7 +2136,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { return atomic64_dec_return(v) == 0; @@ -2154,7 +2153,7 @@ atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { return atomic64_inc_return(v) == 0; @@ -2172,7 +2171,7 @@ atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; @@ -2190,7 +2189,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 c = atomic64_read(v); @@ -2215,7 +2214,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { return atomic64_fetch_add_unless(v, a, u) != u; @@ -2231,7 +2230,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { return atomic64_add_unless(v, 1, 0); @@ -2240,7 +2239,7 @@ atomic64_inc_not_zero(atomic64_t *v) #endif #ifndef atomic64_inc_unless_negative -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2256,7 +2255,7 @@ atomic64_inc_unless_negative(atomic64_t *v) #endif #ifndef atomic64_dec_unless_positive -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2272,7 +2271,7 @@ atomic64_dec_unless_positive(atomic64_t *v) #endif #ifndef atomic64_dec_if_positive -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { s64 dec, c = atomic64_read(v); @@ -2288,8 +2287,5 @@ atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_if_positive atomic64_dec_if_positive #endif -#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 25de4a2804d70f57e994fe3b419148658bb5378a +// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 4c0d009a46f0..571a11008ab5 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -25,6 +25,12 @@ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed @@ -71,7 +77,12 @@ __ret; \ }) +#ifdef ARCH_ATOMIC +#include <linux/atomic-arch-fallback.h> +#include <asm-generic/atomic-instrumented.h> +#else #include <linux/atomic-fallback.h> +#endif #include <asm-generic/atomic-long.h> diff --git a/include/linux/bch.h b/include/linux/bch.h index aa765af85c38..85fdce83d4e2 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -33,6 +33,7 @@ * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t + * @swap_bits: swap bits within data and syndrome bytes */ struct bch_control { unsigned int m; @@ -51,16 +52,18 @@ struct bch_control { int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; + bool swap_bits; }; -struct bch_control *init_bch(int m, int t, unsigned int prim_poly); +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits); -void free_bch(struct bch_control *bch); +void bch_free(struct bch_control *bch); -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h index 8ed53d7524ea..e66b711d091e 100644 --- a/include/linux/bsearch.h +++ b/include/linux/bsearch.h @@ -4,7 +4,29 @@ #include <linux/types.h> -void *bsearch(const void *key, const void *base, size_t num, size_t size, - cmp_func_t cmp); +static __always_inline +void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp) +{ + const char *pivot; + int result; + + while (num > 0) { + pivot = base + (num >> 1) * size; + result = cmp(key, pivot); + + if (result == 0) + return (void *)pivot; + + if (result > 0) { + base = pivot + size; + num--; + } + num >>= 1; + } + + return NULL; +} + +extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp); #endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/cache.h b/include/linux/cache.h index 750621e41d1c..1aa8009f6d06 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -15,8 +15,14 @@ /* * __read_mostly is used to keep rarely changing variables out of frequently - * updated cachelines. If an architecture doesn't support it, ignore the - * hint. + * updated cachelines. Its use should be reserved for data that is used + * frequently in hot paths. Performance traces can help decide when to use + * this. You want __read_mostly data to be tightly packed, so that in the + * best case multiple frequently read variables for a hot path will be next + * to each other in order to reduce the number of cachelines needed to + * execute a critical path. We should be mindful and selective of its use. + * ie: if you're going to use it please supply a *good* justification in your + * commit log */ #ifndef __read_mostly #define __read_mostly diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index a954def26c0d..900b9f4e0605 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -34,7 +34,7 @@ struct can_skb_priv { int ifindex; int skbcnt; - struct can_frame cf[0]; + struct can_frame cf[]; }; static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) diff --git a/include/linux/cb710.h b/include/linux/cb710.h index 60de3fedd3a7..405657a9a0d5 100644 --- a/include/linux/cb710.h +++ b/include/linux/cb710.h @@ -36,7 +36,7 @@ struct cb710_chip { unsigned slot_mask; unsigned slots; spinlock_t irq_lock; - struct cb710_slot slot[0]; + struct cb710_slot slot[]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 2b1b35240074..3f01d43f0598 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); extern int tegra210_clk_handle_mbist_war(unsigned int id); +extern void tegra210_clk_emc_dll_enable(bool flag); +extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); +extern void tegra210_clk_emc_update_setting(u32 emc_src_value); struct clk; @@ -143,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, void *cb_arg); int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +struct tegra210_clk_emc_config { + unsigned long rate; + bool same_freq; + u32 value; + + unsigned long parent_rate; + u8 parent; +}; + +struct tegra210_clk_emc_provider { + struct module *owner; + struct device *dev; + + struct tegra210_clk_emc_config *configs; + unsigned int num_configs; + + int (*set_rate)(struct device *dev, + const struct tegra210_clk_emc_config *config); +}; + +int tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider); +void tegra210_clk_emc_detach(struct clk *clk); + #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 790c0c6b8552..ee37256ec8bd 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -16,7 +16,7 @@ #define KASAN_ABI_VERSION 5 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) -/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +/* Emulate GCC's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ #define __no_sanitize_address \ __attribute__((no_sanitize("address", "hwaddress"))) @@ -24,6 +24,15 @@ #define __no_sanitize_address #endif +#if __has_feature(thread_sanitizer) +/* emulate gcc's __SANITIZE_THREAD__ flag */ +#define __SANITIZE_THREAD__ +#define __no_sanitize_thread \ + __attribute__((no_sanitize("thread"))) +#else +#define __no_sanitize_thread +#endif + /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d7ee4c6bad48..7dd4e0349ef3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -10,7 +10,8 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) -#if GCC_VERSION < 40600 +/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ +#if GCC_VERSION < 40800 # error Sorry, your compiler is too old - please upgrade it. #endif @@ -126,9 +127,7 @@ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ -#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ -#endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 70000 @@ -145,6 +144,12 @@ #define __no_sanitize_address #endif +#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__) +#define __no_sanitize_thread __attribute__((no_sanitize_thread)) +#else +#define __no_sanitize_thread +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 6325d64e3c3b..30827f82ad62 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -230,60 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -#include <uapi/linux/types.h> - -#define __READ_ONCE_SIZE \ -({ \ - switch (size) { \ - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ - default: \ - barrier(); \ - __builtin_memcpy((void *)res, (const void *)p, size); \ - barrier(); \ - } \ -}) - -static __always_inline -void __read_once_size(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -#ifdef CONFIG_KASAN -/* - * We can't declare function 'inline' because __no_sanitize_address confilcts - * with inlining. Attempt to inline it may cause a build failure. - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - * '__maybe_unused' allows us to avoid defined-but-not-used warnings. - */ -# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused -#else -# define __no_kasan_or_inline __always_inline -#endif - -static __no_kasan_or_inline -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -static __always_inline void __write_once_size(volatile void *p, void *res, int size) -{ - switch (size) { - case 1: *(volatile __u8 *)p = *(__u8 *)res; break; - case 2: *(volatile __u16 *)p = *(__u16 *)res; break; - case 4: *(volatile __u32 *)p = *(__u32 *)res; break; - case 8: *(volatile __u64 *)p = *(__u64 *)res; break; - default: - barrier(); - __builtin_memcpy((void *)p, (const void *)res, size); - barrier(); - } -} - /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of @@ -293,11 +239,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * statements. * * These two macros will also work on aggregate data types like structs or - * unions. If the size of the accessed data type exceeds the word size of - * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will - * fall back to memcpy(). There's at least two memcpy()s: one for the - * __builtin_memcpy() and then one for the macro doing the copy of variable - * - '__u' allocated on the stack. + * unions. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, @@ -308,24 +250,79 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s */ #include <asm/barrier.h> #include <linux/kasan-checks.h> +#include <linux/kcsan-checks.h> + +/** + * data_race - mark an expression as containing intentional data races + * + * This data_race() macro is useful for situations in which data races + * should be forgiven. One example is diagnostic code that accesses + * shared variables but is not a part of the core synchronization design. + * + * This macro *does not* affect normal code generation, but is a hint + * to tooling that data races here are to be ignored. + */ +#define data_race(expr) \ +({ \ + __unqual_scalar_typeof(({ expr; })) __v = ({ \ + __kcsan_disable_current(); \ + expr; \ + }); \ + __kcsan_enable_current(); \ + __v; \ +}) + +/* + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any + * atomicity or dependency ordering guarantees. Note that this may result + * in tears! + */ +#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) -#define __READ_ONCE(x, check) \ +#define __READ_ONCE_SCALAR(x) \ ({ \ - union { typeof(x) __val; char __c[1]; } __u; \ - if (check) \ - __read_once_size(&(x), __u.__c, sizeof(x)); \ - else \ - __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ - smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ - __u.__val; \ + __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ + smp_read_barrier_depends(); \ + (typeof(x))__x; \ }) -#define READ_ONCE(x) __READ_ONCE(x, 1) + +#define READ_ONCE(x) \ +({ \ + compiletime_assert_rwonce_type(x); \ + __READ_ONCE_SCALAR(x); \ +}) + +#define __WRITE_ONCE(x, val) \ +do { \ + *(volatile typeof(x) *)&(x) = (val); \ +} while (0) + +#define WRITE_ONCE(x, val) \ +do { \ + compiletime_assert_rwonce_type(x); \ + __WRITE_ONCE(x, val); \ +} while (0) + +static __no_sanitize_or_inline +unsigned long __read_once_word_nocheck(const void *addr) +{ + return __READ_ONCE(*(unsigned long *)addr); +} /* - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need - * to hide memory access from KASAN. + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a + * word from memory atomically but without telling KASAN/KCSAN. This is + * usually used by unwinding code when walking the stack of a running process. */ -#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) +#define READ_ONCE_NOCHECK(x) \ +({ \ + unsigned long __x; \ + compiletime_assert(sizeof(x) == sizeof(__x), \ + "Unsupported access size for READ_ONCE_NOCHECK()."); \ + __x = __read_once_word_nocheck(&(x)); \ + smp_read_barrier_depends(); \ + (typeof(x))__x; \ +}) static __no_kasan_or_inline unsigned long read_word_at_a_time(const void *addr) @@ -334,14 +331,6 @@ unsigned long read_word_at_a_time(const void *addr) return *(unsigned long *)addr; } -#define WRITE_ONCE(x, val) \ -({ \ - union { typeof(x) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(x)) (val) }; \ - __write_once_size(&(x), __u.__c, sizeof(x)); \ - __u.__val; \ -}) - #endif /* __KERNEL__ */ /* @@ -406,6 +395,16 @@ static inline void *offset_to_ptr(const int *off) compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") +/* + * Yes, this permits 64-bit accesses on 32-bit architectures. These will + * actually be atomic in some cases (namely Armv7 + LPAE), but for others we + * rely on the access being split into 2x32-bit accesses for a 32-bit quantity + * (e.g. a virtual address) and a strong prevailing wind. + */ +#define compiletime_assert_rwonce_type(t) \ + compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ + "Unsupported access size for {READ,WRITE}_ONCE().") + /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 6fcf73200b67..21aed0981edf 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -171,6 +171,38 @@ struct ftrace_likely_data { */ #define noinline_for_stack noinline +/* + * Sanitizer helper attributes: Because using __always_inline and + * __no_sanitize_* conflict, provide helper attributes that will either expand + * to __no_sanitize_* in compilation units where instrumentation is enabled + * (__SANITIZE_*__), or __always_inline in compilation units without + * instrumentation (__SANITIZE_*__ undefined). + */ +#ifdef __SANITIZE_ADDRESS__ +/* + * We can't declare function 'inline' because __no_sanitize_address conflicts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused +# define __no_sanitize_or_inline __no_kasan_or_inline +#else +# define __no_kasan_or_inline __always_inline +#endif + +#define __no_kcsan __no_sanitize_thread +#ifdef __SANITIZE_THREAD__ +# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused +# define __no_sanitize_or_inline __no_kcsan_or_inline +#else +# define __no_kcsan_or_inline __always_inline +#endif + +#ifndef __no_sanitize_or_inline +#define __no_sanitize_or_inline __always_inline +#endif + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -218,6 +250,53 @@ struct ftrace_likely_data { /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +/* + * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged. + */ +#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__) +/* + * We build this out of a couple of helper macros in a vain attempt to + * help you keep your lunch down while reading it. + */ +#define __pick_scalar_type(x, type, otherwise) \ + __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) + +/* + * 'char' is not type-compatible with either 'signed char' or 'unsigned char', + * so we include the naked type here as well as the signed/unsigned variants. + */ +#define __pick_integer_type(x, type, otherwise) \ + __pick_scalar_type(x, type, \ + __pick_scalar_type(x, unsigned type, \ + __pick_scalar_type(x, signed type, otherwise))) + +#define __unqual_scalar_typeof(x) typeof( \ + __pick_integer_type(x, char, \ + __pick_integer_type(x, short, \ + __pick_integer_type(x, int, \ + __pick_integer_type(x, long, \ + __pick_integer_type(x, long long, x)))))) +#else +/* + * If supported, prefer C11 _Generic for better compile-times. As above, 'char' + * is not type-compatible with 'signed char', and we define a separate case. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (signed type)0 + +#define __unqual_scalar_typeof(x) typeof( \ + _Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (x))) +#endif + /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 8cac62ee6add..981b880d5b60 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -33,13 +33,13 @@ static inline void user_exit(void) } /* Called with interrupts disabled. */ -static inline void user_enter_irqoff(void) +static __always_inline void user_enter_irqoff(void) { if (context_tracking_enabled()) __context_tracking_enter(CONTEXT_USER); } -static inline void user_exit_irqoff(void) +static __always_inline void user_exit_irqoff(void) { if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_USER); @@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) * is enabled. If context tracking is disabled, returns * CONTEXT_DISABLED. This should be used primarily for debugging. */ -static inline enum ctx_state ct_state(void) +static __always_inline enum ctx_state ct_state(void) { return context_tracking_enabled() ? this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index e7fe6678b7ad..65a60d3313b0 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -26,12 +26,12 @@ struct context_tracking { extern struct static_key_false context_tracking_key; DECLARE_PER_CPU(struct context_tracking, context_tracking); -static inline bool context_tracking_enabled(void) +static __always_inline bool context_tracking_enabled(void) { return static_branch_unlikely(&context_tracking_key); } -static inline bool context_tracking_enabled_cpu(int cpu) +static __always_inline bool context_tracking_enabled_cpu(int cpu) { return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); } @@ -41,7 +41,7 @@ static inline bool context_tracking_enabled_this_cpu(void) return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } -static inline bool context_tracking_in_user(void) +static __always_inline bool context_tracking_in_user(void) { return __this_cpu_read(context_tracking.state) == CONTEXT_USER; } diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index 65501d8f9778..a3bdc8a98f2c 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) struct cpuidle_driver; #ifdef CONFIG_CPU_IDLE_THERMAL -int cpuidle_cooling_register(struct cpuidle_driver *drv); -int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv); +void cpuidle_cooling_register(struct cpuidle_driver *drv); #else /* CONFIG_CPU_IDLE_THERMAL */ -static inline int cpuidle_cooling_register(struct cpuidle_driver *drv) +static inline void cpuidle_cooling_register(struct cpuidle_driver *drv) { - return 0; -} -static inline int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv) -{ - return 0; } #endif /* CONFIG_CPU_IDLE_THERMAL */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 67d5950bd878..3494f6763597 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -367,7 +367,7 @@ struct cpufreq_driver { /* platform specific boost support code */ bool boost_enabled; - int (*set_boost)(int state); + int (*set_boost)(struct cpufreq_policy *policy, int state); }; /* flags */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 8377afef8806..191772d4a4d7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -102,6 +102,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_IRQ_RISCV_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index bc156285d097..a5192b718dbe 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -5,9 +5,10 @@ #include <linux/kexec.h> #include <linux/proc_fs.h> #include <linux/elf.h> +#include <linux/pgtable.h> #include <uapi/linux/vmcore.h> -#include <asm/pgtable.h> /* for pgprot_t */ +#include <linux/pgtable.h> /* for pgprot_t */ #ifdef CONFIG_CRASH_DUMP #define ELFCORE_ADDR_MAX (-1ULL) diff --git a/include/linux/dax.h b/include/linux/dax.h index d7af5d243f24..6904d4e0b2e0 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -5,7 +5,6 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/radix-tree.h> -#include <asm/pgtable.h> /* Flag for synchronous flush */ #define DAXDEV_F_SYNC (1UL << 0) diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 257ab3c92cb8..e7e45f0cc7da 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -12,7 +12,7 @@ extern int debug_locks __read_mostly; extern int debug_locks_silent __read_mostly; -static inline int __debug_locks_off(void) +static __always_inline int __debug_locks_off(void) { return xchg(&debug_locks, 0); } diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index b59f1b6be3e9..ca09a4e07d2d 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -3,7 +3,7 @@ #define _LINUX_DMA_NONCOHERENT_H 1 #include <linux/dma-mapping.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #include <asm/dma-coherence.h> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e1c03339918f..6283917edd90 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -153,7 +153,7 @@ struct dma_interleaved_template { bool dst_sgl; size_t numf; size_t frame_size; - struct data_chunk sgl[0]; + struct data_chunk sgl[]; }; /** @@ -535,7 +535,7 @@ struct dmaengine_unmap_data { struct device *dev; struct kref kref; size_t len; - dma_addr_t addr[0]; + dma_addr_t addr[]; }; struct dma_async_tx_descriptor; diff --git a/include/linux/edac.h b/include/linux/edac.h index 0f20b986b0ab..6eb7d55d7c3d 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -31,14 +31,6 @@ struct device; extern int edac_op_state; struct bus_type *edac_get_sysfs_subsys(void); -int edac_get_report_status(void); -void edac_set_report_status(int new); - -enum { - EDAC_REPORTING_ENABLED, - EDAC_REPORTING_DISABLED, - EDAC_REPORTING_FORCE -}; static inline void opstate_init(void) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 01f5d296f9bb..6c4ab4dc1cd7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1048,6 +1048,7 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); }; struct lock_manager { @@ -1412,6 +1413,8 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 +#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ + /* Possible states of 'frozen' field */ enum { SB_UNFROZEN = 0, /* FS is unfrozen */ @@ -1679,10 +1682,10 @@ static inline int sb_start_write_trylock(struct super_block *sb) * * Since page fault freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. It is advised to - * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault + * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault * handling code implies lock dependency: * - * mmap_sem + * mmap_lock * -> sb_start_pagefault */ static inline void sb_start_pagefault(struct super_block *sb) @@ -3201,6 +3204,8 @@ enum { DIO_SKIP_HOLES = 0x02, }; +void dio_end_io(struct bio *bio); + ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, get_block_t get_block, diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index ce0b5fbf239d..3f0b19dcfae7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -46,7 +46,7 @@ struct fscache_cache_tag { unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; - char name[0]; /* tag name */ + char name[]; /* tag name */ }; /* diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dfbbf7a7208b..e339dac91ee6 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -342,9 +342,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, extern int stack_tracer_enabled; -int stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index e07cf853aa16..03c9fece7d43 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -38,9 +38,24 @@ static __always_inline void rcu_irq_enter_check_tick(void) } while (0) /* + * Like __irq_enter() without time accounting for fast + * interrupts, e.g. reschedule IPI where time accounting + * is more expensive than the actual interrupt. + */ +#define __irq_enter_raw() \ + do { \ + preempt_count_add(HARDIRQ_OFFSET); \ + lockdep_hardirq_enter(); \ + } while (0) + +/* * Enter irq context (on NO_HZ, update jiffies): */ -extern void irq_enter(void); +void irq_enter(void); +/* + * Like irq_enter(), but RCU is already watching. + */ +void irq_enter_rcu(void); /* * Exit irq context without processing softirqs: @@ -53,9 +68,23 @@ extern void irq_enter(void); } while (0) /* + * Like __irq_exit() without time accounting + */ +#define __irq_exit_raw() \ + do { \ + lockdep_hardirq_exit(); \ + preempt_count_sub(HARDIRQ_OFFSET); \ + } while (0) + +/* * Exit irq context and process softirqs if needed: */ -extern void irq_exit(void); +void irq_exit(void); + +/* + * Like irq_exit(), but return with RCU watching. + */ +void irq_exit_rcu(void); #ifndef arch_nmi_enter #define arch_nmi_enter() do { } while (0) @@ -87,20 +116,24 @@ extern void rcu_nmi_exit(void); arch_nmi_enter(); \ printk_nmi_enter(); \ lockdep_off(); \ - ftrace_nmi_enter(); \ BUG_ON(in_nmi() == NMI_MASK); \ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ rcu_nmi_enter(); \ lockdep_hardirq_enter(); \ + instrumentation_begin(); \ + ftrace_nmi_enter(); \ + instrumentation_end(); \ } while (0) #define nmi_exit() \ do { \ + instrumentation_begin(); \ + ftrace_nmi_exit(); \ + instrumentation_end(); \ lockdep_hardirq_exit(); \ rcu_nmi_exit(); \ BUG_ON(!in_nmi()); \ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - ftrace_nmi_exit(); \ lockdep_on(); \ printk_nmi_exit(); \ arch_nmi_exit(); \ diff --git a/include/linux/hmm.h b/include/linux/hmm.h index e912b9dc4633..f4a09ed223ac 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -10,7 +10,7 @@ #define LINUX_HMM_H #include <linux/kconfig.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <linux/device.h> #include <linux/migrate.h> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cfbb0a87c5f0..71f20776b06c 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -248,7 +248,7 @@ static inline int is_swap_pmd(pmd_t pmd) return !pmd_none(pmd) && !pmd_present(pmd); } -/* mmap_sem must be held on entry */ +/* mmap_lock must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0cced410e0bd..50650d0d01b9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -9,7 +9,7 @@ #include <linux/cgroup.h> #include <linux/list.h> #include <linux/kref.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> struct ctl_table; struct user_struct; diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 8c5459034f92..1e4e0de4ef8b 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -2,7 +2,7 @@ /* * i2c-smbus.h - SMBus extensions to the I2C protocol * - * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de> + * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de> */ #ifndef _LINUX_I2C_SMBUS_H @@ -39,4 +39,10 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) } #endif +#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI) +void i2c_register_spd(struct i2c_adapter *adap); +#else +static inline void i2c_register_spd(struct i2c_adapter *adap) { } +#endif + #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 49d29054e657..c10617bb980a 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -351,14 +351,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) return to_i2c_client(dev); } -static inline void *i2c_get_clientdata(const struct i2c_client *dev) +static inline void *i2c_get_clientdata(const struct i2c_client *client) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&client->dev); } -static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) +static inline void i2c_set_clientdata(struct i2c_client *client, void *data) { - dev_set_drvdata(&dev->dev, data); + dev_set_drvdata(&client->dev, data); } /* I2C slave support */ diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index a445cd1a36c5..91a8612b8bf9 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev, void idle_inject_get_duration(struct idle_inject_device *ii_dev, unsigned int *run_duration_us, unsigned int *idle_duration_us); + +void idle_inject_set_latency(struct idle_inject_device *ii_dev, + unsigned int latency_ns); + #endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h deleted file mode 100644 index 3614a13a8297..000000000000 --- a/include/linux/input/gp2ap002a00f.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _GP2AP002A00F_H_ -#define _GP2AP002A00F_H_ - -#include <linux/i2c.h> - -#define GP2A_I2C_NAME "gp2ap002a00f" - -/** - * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data - * @vout_gpio: The gpio connected to the object detected pin (VOUT) - * @wakeup: Set to true if the proximity can wake the device from suspend - * @hw_setup: Callback for setting up hardware such as gpios and vregs - * @hw_shutdown: Callback for properly shutting down hardware - */ -struct gp2a_platform_data { - int vout_gpio; - bool wakeup; - int (*hw_setup)(struct i2c_client *client); - int (*hw_shutdown)(struct i2c_client *client); -}; - -#endif diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index 9e409bb13642..3b8580bd33c1 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis) bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); +static inline void input_mt_report_slot_inactive(struct input_dev *dev) +{ + input_mt_report_slot_state(dev, 0, false); +} + void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); void input_mt_drop_unused(struct input_dev *dev); diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h new file mode 100644 index 000000000000..43e6ea591975 --- /dev/null +++ b/include/linux/instrumented.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This header provides generic wrappers for memory access instrumentation that + * the compiler cannot emit for: KASAN, KCSAN. + */ +#ifndef _LINUX_INSTRUMENTED_H +#define _LINUX_INSTRUMENTED_H + +#include <linux/compiler.h> +#include <linux/kasan-checks.h> +#include <linux/kcsan-checks.h> +#include <linux/types.h> + +/** + * instrument_read - instrument regular read access + * + * Instrument a regular read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_read(v, size); +} + +/** + * instrument_write - instrument regular write access + * + * Instrument a regular write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_write(v, size); +} + +/** + * instrument_atomic_read - instrument atomic read access + * + * Instrument an atomic read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +/** + * instrument_atomic_write - instrument atomic write access + * + * Instrument an atomic write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} + +/** + * instrument_copy_to_user - instrument reads of copy_to_user + * + * Instrument reads from kernel memory, that are due to copy_to_user (and + * variants). The instrumentation must be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + kasan_check_read(from, n); + kcsan_check_read(from, n); +} + +/** + * instrument_copy_from_user - instrument writes of copy_from_user + * + * Instrument writes to kernel memory, that are due to copy_from_user (and + * variants). The instrumentation should be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_from_user(const void *to, const void __user *from, unsigned long n) +{ + kasan_check_write(to, n); + kcsan_check_write(to, n); +} + +#endif /* _LINUX_INSTRUMENTED_H */ diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index d8c29049f066..3a63d98613fc 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -35,6 +35,7 @@ int icc_enable(struct icc_path *path); int icc_disable(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); void icc_set_tag(struct icc_path *path, u32 tag); +const char *icc_get_name(struct icc_path *path); #else @@ -84,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag) { } +static inline const char *icc_get_name(struct icc_path *path) +{ + return NULL; +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 80f637c3a6f3..5db970b6615a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -760,8 +760,10 @@ extern int arch_early_irq_init(void); /* * We want to know which function is an entrypoint of a hardirq or a softirq. */ -#define __irq_entry __attribute__((__section__(".irqentry.text"))) -#define __softirq_entry \ - __attribute__((__section__(".softirqentry.text"))) +#ifndef __irq_entry +# define __irq_entry __attribute__((__section__(".irqentry.text"))) +#endif + +#define __softirq_entry __attribute__((__section__(".softirqentry.text"))) #endif diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index b336622612f3..0beaa3eba155 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/bug.h> #include <linux/io.h> +#include <linux/pgtable.h> #include <asm/page.h> /* @@ -99,7 +100,6 @@ io_mapping_unmap(void __iomem *vaddr) #else #include <linux/uaccess.h> -#include <asm/pgtable.h> /* Create the io_mapping object*/ static inline struct io_mapping * diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index d7f7e436c3af..6384d2813ded 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -32,7 +32,7 @@ #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_hardirqs_on_prepare(void); - extern void trace_hardirqs_off_prepare(void); + extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); # define lockdep_hardirq_context(p) ((p)->hardirq_context) @@ -101,7 +101,7 @@ do { \ #else # define trace_hardirqs_on_prepare() do { } while (0) -# define trace_hardirqs_off_prepare() do { } while (0) +# define trace_hardirqs_off_finish() do { } while (0) # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) # define lockdep_hardirq_context(p) 0 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f613d8529863..d56128df2aff 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -766,6 +766,11 @@ struct journal_s int j_errno; /** + * @j_abort_mutex: Lock the whole aborting procedure. + */ + struct mutex j_abort_mutex; + + /** * @j_sb_buffer: The first part of the superblock buffer. */ struct buffer_head *j_sb_buffer; @@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ -#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 657a83b943f0..98338dc6b5d2 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -165,9 +165,9 @@ static inline int kallsyms_show_value(void) #endif /*CONFIG_KALLSYMS*/ -static inline void print_ip_sym(unsigned long ip) +static inline void print_ip_sym(const char *loglvl, unsigned long ip) { - printk("[<%px>] %pS\n", (void *) ip, (void *) ip); + printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 31314ca7c635..82522e996c76 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -11,8 +11,8 @@ struct task_struct; #ifdef CONFIG_KASAN +#include <linux/pgtable.h> #include <asm/kasan.h> -#include <asm/pgtable.h> extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h new file mode 100644 index 000000000000..7b0b9c44f5f3 --- /dev/null +++ b/include/linux/kcsan-checks.h @@ -0,0 +1,430 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_CHECKS_H +#define _LINUX_KCSAN_CHECKS_H + +/* Note: Only include what is already included by compiler.h. */ +#include <linux/compiler_attributes.h> +#include <linux/types.h> + +/* + * ACCESS TYPE MODIFIERS + * + * <none>: normal read access; + * WRITE : write access; + * ATOMIC: access is atomic; + * ASSERT: access is not a regular access, but an assertion; + * SCOPED: access is a scoped access; + */ +#define KCSAN_ACCESS_WRITE 0x1 +#define KCSAN_ACCESS_ATOMIC 0x2 +#define KCSAN_ACCESS_ASSERT 0x4 +#define KCSAN_ACCESS_SCOPED 0x8 + +/* + * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used + * even in compilation units that selectively disable KCSAN, but must use KCSAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KCSAN +/** + * __kcsan_check_access - check generic access for races + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + */ +void __kcsan_check_access(const volatile void *ptr, size_t size, int type); + +/** + * kcsan_disable_current - disable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_disable_current(void); + +/** + * kcsan_enable_current - re-enable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_enable_current(void); +void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */ + +/** + * kcsan_nestable_atomic_begin - begin nestable atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_nestable_atomic_begin(void); + +/** + * kcsan_nestable_atomic_end - end nestable atomic region + */ +void kcsan_nestable_atomic_end(void); + +/** + * kcsan_flat_atomic_begin - begin flat atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_flat_atomic_begin(void); + +/** + * kcsan_flat_atomic_end - end flat atomic region + */ +void kcsan_flat_atomic_end(void); + +/** + * kcsan_atomic_next - consider following accesses as atomic + * + * Force treating the next n memory accesses for the current context as atomic + * operations. + * + * @n: number of following memory accesses to treat as atomic. + */ +void kcsan_atomic_next(int n); + +/** + * kcsan_set_access_mask - set access mask + * + * Set the access mask for all accesses for the current context if non-zero. + * Only value changes to bits set in the mask will be reported. + * + * @mask: bitmask + */ +void kcsan_set_access_mask(unsigned long mask); + +/* Scoped access information. */ +struct kcsan_scoped_access { + struct list_head list; + const volatile void *ptr; + size_t size; + int type; +}; +/* + * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes + * out of scope; relies on attribute "cleanup", which is supported by all + * compilers that support KCSAN. + */ +#define __kcsan_cleanup_scoped \ + __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access))) + +/** + * kcsan_begin_scoped_access - begin scoped access + * + * Begin scoped access and initialize @sa, which will cause KCSAN to + * continuously check the memory range in the current thread until + * kcsan_end_scoped_access() is called for @sa. + * + * Scoped accesses are implemented by appending @sa to an internal list for the + * current execution context, and then checked on every call into the KCSAN + * runtime. + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + * @sa: struct kcsan_scoped_access to use for the scope of the access + */ +struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa); + +/** + * kcsan_end_scoped_access - end scoped access + * + * End a scoped access, which will stop KCSAN checking the memory range. + * Requires that kcsan_begin_scoped_access() was previously called once for @sa. + * + * @sa: a previously initialized struct kcsan_scoped_access + */ +void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); + + +#else /* CONFIG_KCSAN */ + +static inline void __kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } + +static inline void kcsan_disable_current(void) { } +static inline void kcsan_enable_current(void) { } +static inline void kcsan_enable_current_nowarn(void) { } +static inline void kcsan_nestable_atomic_begin(void) { } +static inline void kcsan_nestable_atomic_end(void) { } +static inline void kcsan_flat_atomic_begin(void) { } +static inline void kcsan_flat_atomic_end(void) { } +static inline void kcsan_atomic_next(int n) { } +static inline void kcsan_set_access_mask(unsigned long mask) { } + +struct kcsan_scoped_access { }; +#define __kcsan_cleanup_scoped __maybe_unused +static inline struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa) { return sa; } +static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } + +#endif /* CONFIG_KCSAN */ + +#ifdef __SANITIZE_THREAD__ +/* + * Only calls into the runtime when the particular compilation unit has KCSAN + * instrumentation enabled. May be used in header files. + */ +#define kcsan_check_access __kcsan_check_access + +/* + * Only use these to disable KCSAN for accesses in the current compilation unit; + * calls into libraries may still perform KCSAN checks. + */ +#define __kcsan_disable_current kcsan_disable_current +#define __kcsan_enable_current kcsan_enable_current_nowarn +#else +static inline void kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } +static inline void __kcsan_enable_current(void) { } +static inline void __kcsan_disable_current(void) { } +#endif + +/** + * __kcsan_check_read - check regular read access for races + * + * @ptr: address of access + * @size: size of access + */ +#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) + +/** + * __kcsan_check_write - check regular write access for races + * + * @ptr: address of access + * @size: size of access + */ +#define __kcsan_check_write(ptr, size) \ + __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/** + * kcsan_check_read - check regular read access for races + * + * @ptr: address of access + * @size: size of access + */ +#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) + +/** + * kcsan_check_write - check regular write access for races + * + * @ptr: address of access + * @size: size of access + */ +#define kcsan_check_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/* + * Check for atomic accesses: if atomic accesses are not ignored, this simply + * aliases to kcsan_check_access(), otherwise becomes a no-op. + */ +#ifdef CONFIG_KCSAN_IGNORE_ATOMICS +#define kcsan_check_atomic_read(...) do { } while (0) +#define kcsan_check_atomic_write(...) do { } while (0) +#else +#define kcsan_check_atomic_read(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) +#define kcsan_check_atomic_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) +#endif + +/** + * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var + * + * Assert that there are no concurrent writes to @var; other readers are + * allowed. This assertion can be used to specify properties of concurrent code, + * where violation cannot be detected as a normal data race. + * + * For example, if we only have a single writer, but multiple concurrent + * readers, to avoid data races, all these accesses must be marked; even + * concurrent marked writes racing with the single writer are bugs. + * Unfortunately, due to being marked, they are no longer data races. For cases + * like these, we can use the macro as follows: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * ASSERT_EXCLUSIVE_WRITER(shared_foo); + * WRITE_ONCE(shared_foo, ...); + * spin_unlock(&update_foo_lock); + * } + * void reader(void) { + * // update_foo_lock does not need to be held! + * ... = READ_ONCE(shared_foo); + * } + * + * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent writes are expected exists. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) + +/* + * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is + * expected to be unique for the scope in which instances of kcsan_scoped_access + * are declared. + */ +#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix +#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \ + struct kcsan_scoped_access __kcsan_scoped_name(id, _) \ + __kcsan_cleanup_scoped; \ + struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \ + __maybe_unused = kcsan_begin_scoped_access( \ + &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \ + &__kcsan_scoped_name(id, _)) + +/** + * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to @var for the duration of the + * scope in which it is introduced. This provides a better way to fully cover + * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and + * increases the likelihood for KCSAN to detect racing accesses. + * + * For example, it allows finding race-condition bugs that only occur due to + * state changes within the scope itself: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * { + * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo); + * WRITE_ONCE(shared_foo, 42); + * ... + * // shared_foo should still be 42 here! + * } + * spin_unlock(&update_foo_lock); + * } + * void buggy(void) { + * if (READ_ONCE(shared_foo) == 42) + * WRITE_ONCE(shared_foo, 1); // bug! + * } + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__) + +/** + * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var + * + * Assert that there are no concurrent accesses to @var (no readers nor + * writers). This assertion can be used to specify properties of concurrent + * code, where violation cannot be detected as a normal data race. + * + * For example, where exclusive access is expected after determining no other + * users of an object are left, but the object is not actually freed. We can + * check that this property actually holds as follows: + * + * .. code-block:: c + * + * if (refcount_dec_and_test(&obj->refcnt)) { + * ASSERT_EXCLUSIVE_ACCESS(*obj); + * do_some_cleanup(obj); + * release_for_reuse(obj); + * } + * + * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent accesses are expected exists. + * + * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better + * fit to detect use-after-free bugs. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) + +/** + * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_ACCESS(). + * + * Assert that there are no concurrent accesses to @var (no readers nor writers) + * for the entire duration of the scope in which it is introduced. This provides + * a better way to fully cover the enclosing scope, compared to multiple + * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect + * racing accesses. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__) + +/** + * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var + * + * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to a subset of bits in @var; + * concurrent readers are permitted. This assertion captures more detailed + * bit-level properties, compared to the other (word granularity) assertions. + * Only the bits set in @mask are checked for concurrent modifications, while + * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits + * are ignored. + * + * Use this for variables, where some bits must not be modified concurrently, + * yet other bits are expected to be modified concurrently. + * + * For example, variables where, after initialization, some bits are read-only, + * but other bits may still be modified concurrently. A reader may wish to + * assert that this is true as follows: + * + * .. code-block:: c + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed + * to access the masked bits only, and KCSAN optimistically assumes it is + * therefore safe, even in the presence of data races, and marking it with + * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that + * it may still be advisable to do so, since we cannot reason about all compiler + * optimizations when it comes to bit manipulations (on the reader and writer + * side). If you are sure nothing can go wrong, we can write the above simply + * as: + * + * .. code-block:: c + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Another example, where this may be used, is when certain bits of @var may + * only be modified when holding the appropriate lock, but other bits may still + * be modified concurrently. Writers, where other bits may change concurrently, + * could use the assertion as follows: + * + * .. code-block:: c + * + * spin_lock(&foo_lock); + * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); + * old_flags = flags; + * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); + * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } + * spin_unlock(&foo_lock); + * + * @var: variable to assert on + * @mask: only check for modifications to bits set in @mask + */ +#define ASSERT_EXCLUSIVE_BITS(var, mask) \ + do { \ + kcsan_set_access_mask(mask); \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\ + kcsan_set_access_mask(0); \ + kcsan_atomic_next(1); \ + } while (0) + +#endif /* _LINUX_KCSAN_CHECKS_H */ diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h new file mode 100644 index 000000000000..53340d8789f9 --- /dev/null +++ b/include/linux/kcsan.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_H +#define _LINUX_KCSAN_H + +#include <linux/kcsan-checks.h> +#include <linux/types.h> + +#ifdef CONFIG_KCSAN + +/* + * Context for each thread of execution: for tasks, this is stored in + * task_struct, and interrupts access internal per-CPU storage. + */ +struct kcsan_ctx { + int disable_count; /* disable counter */ + int atomic_next; /* number of following atomic ops */ + + /* + * We distinguish between: (a) nestable atomic regions that may contain + * other nestable regions; and (b) flat atomic regions that do not keep + * track of nesting. Both (a) and (b) are entirely independent of each + * other, and a flat region may be started in a nestable region or + * vice-versa. + * + * This is required because, for example, in the annotations for + * seqlocks, we declare seqlock writer critical sections as (a) nestable + * atomic regions, but reader critical sections as (b) flat atomic + * regions, but have encountered cases where seqlock reader critical + * sections are contained within writer critical sections (the opposite + * may be possible, too). + * + * To support these cases, we independently track the depth of nesting + * for (a), and whether the leaf level is flat for (b). + */ + int atomic_nest_count; + bool in_flat_atomic; + + /* + * Access mask for all accesses if non-zero. + */ + unsigned long access_mask; + + /* List of scoped accesses. */ + struct list_head scoped_accesses; +}; + +/** + * kcsan_init - initialize KCSAN runtime + */ +void kcsan_init(void); + +#else /* CONFIG_KCSAN */ + +static inline void kcsan_init(void) { } + +#endif /* CONFIG_KCSAN */ + +#endif /* _LINUX_KCSAN_H */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1776eb2e43a4..ea67910ae6b7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -208,7 +208,7 @@ struct crash_mem_range { struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct crash_mem_range ranges[0]; + struct crash_mem_range ranges[]; }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/key.h b/include/linux/key.h index 6cf8e71cf8b7..0f2e24f13c2b 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -71,6 +71,23 @@ struct net; #define KEY_PERM_UNDEF 0xffffffff +/* + * The permissions required on a key that we're looking up. + */ +enum key_need_perm { + KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */ + KEY_NEED_VIEW, /* Require permission to view attributes */ + KEY_NEED_READ, /* Require permission to read content */ + KEY_NEED_WRITE, /* Require permission to update / modify */ + KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */ + KEY_NEED_LINK, /* Require permission to link */ + KEY_NEED_SETATTR, /* Require permission to change attributes */ + KEY_NEED_UNLINK, /* Require permission to unlink key */ + KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */ + KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */ + KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */ +}; + struct seq_file; struct user_struct; struct signal_struct; @@ -176,6 +193,9 @@ struct key { struct list_head graveyard_link; struct rb_node serial_node; }; +#ifdef CONFIG_KEY_NOTIFICATIONS + struct watch_list *watchers; /* Entities watching this key for changes */ +#endif struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ @@ -417,20 +437,9 @@ static inline key_serial_t key_serial(const struct key *key) extern void key_set_timeout(struct key *, unsigned); extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, - key_perm_t perm); + enum key_need_perm need_perm); extern void key_free_user_ns(struct user_namespace *); -/* - * The permissions required on a key that we're looking up. - */ -#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ -#define KEY_NEED_READ 0x02 /* Require permission to read content */ -#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ -#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ -#define KEY_NEED_LINK 0x10 /* Require permission to link */ -#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ -#define KEY_NEED_ALL 0x3f /* All the above permissions */ - static inline short key_read_state(const struct key *key) { /* Barrier versus mark_key_instantiated(). */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index fc8d83e91379..6cba088bee24 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -29,7 +29,7 @@ #include <linux/uidgid.h> #define UEVENT_HELPER_PATH_LEN 256 -#define UEVENT_NUM_ENVP 32 /* number of env pointers */ +#define UEVENT_NUM_ENVP 64 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 594265bfd390..e210af75ee38 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -161,7 +161,7 @@ struct kretprobe_instance { kprobe_opcode_t *ret_addr; struct task_struct *task; void *fp; - char data[0]; + char data[]; }; struct kretprobe_blackpoint { diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 8bbcaad7ef0f..65b81e0c494d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -5,6 +5,8 @@ #include <linux/err.h> #include <linux/sched.h> +struct mm_struct; + __printf(4, 5) struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, @@ -57,6 +59,7 @@ bool kthread_should_stop(void); bool kthread_should_park(void); bool __kthread_should_park(struct task_struct *k); bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_func(struct task_struct *k); void *kthread_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); @@ -198,6 +201,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); void kthread_destroy_worker(struct kthread_worker *worker); +void kthread_use_mm(struct mm_struct *mm); +void kthread_unuse_mm(struct mm_struct *mm); + struct cgroup_subsys_state; #ifdef CONFIG_BLK_CGROUP diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f43b59b1294c..d564855243d8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -206,6 +206,7 @@ struct kvm_async_pf { unsigned long addr; struct kvm_arch_async_pf arch; bool wakeup_all; + bool notpresent_injected; }; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); @@ -318,7 +319,6 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; - struct dentry *debugfs_dentry; }; static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) @@ -409,7 +409,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[0]; + struct hlist_head map[]; }; #endif @@ -888,7 +888,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS -void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); #endif int kvm_arch_hardware_enable(void); @@ -1421,8 +1421,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable); +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); diff --git a/include/linux/libata.h b/include/linux/libata.h index af832852e620..8bf5e59a7859 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -609,7 +609,7 @@ struct ata_host { struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ - struct ata_port *ports[0]; + struct ata_port *ports[]; }; struct ata_queued_cmd { diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 99d629fd9944..28f23b341c1c 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -75,6 +75,7 @@ struct common_audit_data { #define LSM_AUDIT_DATA_IBPKEY 13 #define LSM_AUDIT_DATA_IBENDPORT 14 #define LSM_AUDIT_DATA_LOCKDOWN 15 +#define LSM_AUDIT_DATA_NOTIFICATION 16 union { struct path path; struct dentry *dentry; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index fb3ce6cec997..6791813cd439 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -191,6 +191,8 @@ LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, int flags) +LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, + int flags) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) @@ -254,6 +256,15 @@ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, u32 *ctxlen) +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, + const struct cred *cred, struct watch_notification *n) +#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */ + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +LSM_HOOK(int, 0, watch_key, struct key *key) +#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ + #ifdef CONFIG_SECURITY_NETWORK LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, struct sock *newsk) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 3e62dab77699..95b7c1d32062 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -659,6 +659,15 @@ * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. + * @task_fix_setgid: + * Update the module's state after setting one or more of the group + * identity attributes of the current process. The @flags parameter + * indicates which of the set*gid system calls invoked this hook. + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaced. + * @flags contains one of the LSM_SETID_* values. + * Return 0 on success. * @task_setpgid: * Check permission before setting the process group identifier of the * process @p to @pgid. @@ -1445,6 +1454,20 @@ * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. * + * Security hooks for the general notification queue: + * + * @post_notification: + * Check to see if a watch notification can be posted to a particular + * queue. + * @w_cred: The credentials of the whoever set the watch. + * @cred: The event-triggerer's credentials + * @n: The notification being posted + * + * @watch_key: + * Check to see if a process is allowed to watch for event notifications + * from a key or keyring. + * @key: The key to watch. + * * Security hooks for using the eBPF maps and programs functionalities through * eBPF syscalls. * diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index fee7fab5d706..375515803cd8 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -318,6 +318,7 @@ extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); +extern int offline_and_remove_memory(int nid, u64 start, u64 size); #else static inline void try_offline_node(int nid) {} diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8165278c348a..ea9c15b60a96 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -31,7 +31,7 @@ struct mm_struct; * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. + * mmap_lock. * * Freeing policy: * Mempolicy objects are reference counted. A mempolicy will be freed when diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d6042178ca7..dc7b87310c10 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -15,6 +15,7 @@ #include <linux/atomic.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <linux/range.h> #include <linux/pfn.h> #include <linux/percpu-refcount.h> @@ -28,6 +29,7 @@ #include <linux/overflow.h> #include <linux/sizes.h> #include <linux/sched.h> +#include <linux/pgtable.h> struct mempolicy; struct anon_vma; @@ -92,7 +94,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/processor.h> /* @@ -401,7 +402,7 @@ extern pgprot_t protection_map[16]; * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. - * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying. + * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. * @FAULT_FLAG_TRIED: The fault has been tried once. * @FAULT_FLAG_USER: The fault originated in userspace. @@ -451,10 +452,10 @@ extern pgprot_t protection_map[16]; * fault_flag_allow_retry_first - check ALLOW_RETRY the first time * * This is mostly used for places where we want to try to avoid taking - * the mmap_sem for too long a time when waiting for another condition + * the mmap_lock for too long a time when waiting for another condition * to change, in which case we can try to be polite to release the - * mmap_sem in the first round to avoid potential starvation of other - * processes that would also want the mmap_sem. + * mmap_lock in the first round to avoid potential starvation of other + * processes that would also want the mmap_lock. * * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. @@ -581,7 +582,7 @@ struct vm_operations_struct { * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure * in mm/mempolicy.c will do this automatically. * get_policy() must NOT add a ref if the policy at (vma,addr) is not - * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. * If no [shared/vma] mempolicy exists at the addr, get_policy() op * must return NULL--i.e., do not "fallback" to task or system default * policy. diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ef6d3aface8a..64ede5f150dc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -344,7 +344,7 @@ struct vm_area_struct { * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ - struct list_head anon_vma_chain; /* Serialized by mmap_sem & + struct list_head anon_vma_chain; /* Serialized by mmap_lock & * page_table_lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ @@ -440,7 +440,7 @@ struct mm_struct { spinlock_t page_table_lock; /* Protects page tables and some * counters */ - struct rw_semaphore mmap_sem; + struct rw_semaphore mmap_lock; struct list_head mmlist; /* List of maybe swapped mm's. These * are globally strung together off diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h new file mode 100644 index 000000000000..0707671851a8 --- /dev/null +++ b/include/linux/mmap_lock.h @@ -0,0 +1,90 @@ +#ifndef _LINUX_MMAP_LOCK_H +#define _LINUX_MMAP_LOCK_H + +#include <linux/mmdebug.h> + +#define MMAP_LOCK_INITIALIZER(name) \ + .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), + +static inline void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); +} + +static inline void mmap_write_lock(struct mm_struct *mm) +{ + down_write(&mm->mmap_lock); +} + +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) +{ + down_write_nested(&mm->mmap_lock, subclass); +} + +static inline int mmap_write_lock_killable(struct mm_struct *mm) +{ + return down_write_killable(&mm->mmap_lock); +} + +static inline bool mmap_write_trylock(struct mm_struct *mm) +{ + return down_write_trylock(&mm->mmap_lock) != 0; +} + +static inline void mmap_write_unlock(struct mm_struct *mm) +{ + up_write(&mm->mmap_lock); +} + +static inline void mmap_write_downgrade(struct mm_struct *mm) +{ + downgrade_write(&mm->mmap_lock); +} + +static inline void mmap_read_lock(struct mm_struct *mm) +{ + down_read(&mm->mmap_lock); +} + +static inline int mmap_read_lock_killable(struct mm_struct *mm) +{ + return down_read_killable(&mm->mmap_lock); +} + +static inline bool mmap_read_trylock(struct mm_struct *mm) +{ + return down_read_trylock(&mm->mmap_lock) != 0; +} + +static inline void mmap_read_unlock(struct mm_struct *mm) +{ + up_read(&mm->mmap_lock); +} + +static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) +{ + if (down_read_trylock(&mm->mmap_lock)) { + rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); + return true; + } + return false; +} + +static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) +{ + up_read_non_owner(&mm->mmap_lock); +} + +static inline void mmap_assert_locked(struct mm_struct *mm) +{ + lockdep_assert_held(&mm->mmap_lock); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + +static inline void mmap_assert_write_locked(struct mm_struct *mm) +{ + lockdep_assert_held_write(&mm->mmap_lock); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + +#endif /* _LINUX_MMAP_LOCK_H */ diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index d9a543a9e1cc..c51a84132d7c 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -4,11 +4,6 @@ #include <asm/mmu_context.h> -struct mm_struct; - -void use_mm(struct mm_struct *mm); -void unuse_mm(struct mm_struct *mm); - /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 736f6918335e..fc68f3570e19 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <linux/srcu.h> #include <linux/interval_tree.h> @@ -121,7 +122,7 @@ struct mmu_notifier_ops { /* * invalidate_range_start() and invalidate_range_end() must be - * paired and are called only when the mmap_sem and/or the + * paired and are called only when the mmap_lock and/or the * locks protecting the reverse maps are held. If the subsystem * can't guarantee that no additional references are taken to * the pages in the range, it has to implement the @@ -212,13 +213,13 @@ struct mmu_notifier_ops { }; /* - * The notifier chains are protected by mmap_sem and/or the reverse map + * The notifier chains are protected by mmap_lock and/or the reverse map * semaphores. Notifier chains are only changed when all reverse maps and - * the mmap_sem locks are taken. + * the mmap_lock locks are taken. * * Therefore notifier chains can only be traversed when either * - * 1. mmap_sem is held. + * 1. mmap_lock is held. * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ @@ -277,9 +278,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm) { struct mmu_notifier *ret; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); ret = mmu_notifier_get_locked(ops, mm); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } void mmu_notifier_put(struct mmu_notifier *subscription); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index df1f08486d81..c4c37fd12104 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -660,9 +660,21 @@ struct deferred_split { * per-zone basis. */ typedef struct pglist_data { + /* + * node_zones contains just the zones for THIS node. Not all of the + * zones may be populated, but it is the full list. It is referenced by + * this node's node_zonelists as well as other node's node_zonelists. + */ struct zone node_zones[MAX_NR_ZONES]; + + /* + * node_zonelists contains references to all zones in all nodes. + * Generally the first zones will be references to this node's + * node_zones. + */ struct zonelist node_zonelists[MAX_ZONELISTS]; - int nr_zones; + + int nr_zones; /* number of populated zones in this node */ #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION diff --git a/include/linux/mount.h b/include/linux/mount.h index 7edac8c7a9c1..de657bd211fa 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -111,4 +111,6 @@ extern unsigned int sysctl_mount_max; extern bool path_is_mountpoint(const struct path *path); +extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); + #endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 886e30441c90..d890805f5494 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -98,7 +98,7 @@ struct nand_bbt_descr { /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr - * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index c98a21108688..fd1ecb821106 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -138,7 +138,7 @@ struct cfi_ident { uint16_t InterfaceDesc; uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; - uint32_t EraseRegionInfo[0]; /* Not host ordered */ + uint32_t EraseRegionInfo[]; /* Not host ordered */ } __packed; /* Extended Query Structure for both PRI and ALT */ @@ -165,7 +165,7 @@ struct cfi_pri_intelext { uint16_t ProtRegAddr; uint8_t FactProtRegSize; uint8_t UserProtRegSize; - uint8_t extra[0]; + uint8_t extra[]; } __packed; struct cfi_intelext_otpinfo { @@ -286,7 +286,7 @@ struct cfi_private { map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ - struct flchip chips[0]; /* per-chip data structure for each chip */ + struct flchip chips[]; /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 2d1f4a61f4ac..157357ec1441 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -200,6 +200,8 @@ struct mtd_debug_info { * * @node: list node used to add an MTD partition to the parent partition list * @offset: offset of the partition relatively to the parent offset + * @size: partition size. Should be equal to mtd->size unless + * MTD_SLC_ON_MLC_EMULATION is set * @flags: original flags (before the mtdpart logic decided to tweak them based * on flash constraints, like eraseblock/pagesize alignment) * @@ -209,6 +211,7 @@ struct mtd_debug_info { struct mtd_part { struct list_head node; u64 offset; + u64 size; u32 flags; }; @@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { - return mtd->erasesize / mtd->writesize; + struct mtd_info *master = mtd_get_master(mtd); + + return master->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e545c050d3e8..b74a539ec581 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -37,6 +37,7 @@ * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. + * add_flags: contains flags to add to the parent flags * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). @@ -48,6 +49,7 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + uint32_t add_flags; /* flags to add to the partition */ struct device_node *of_node; }; diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index df5b9fddea16..2e3f43788d48 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -24,7 +24,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[0]; + struct flchip chips[]; }; /* qinfo_query_info structure contains request information for diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 1e76196f9829..65b1c1c18b41 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -83,14 +83,14 @@ struct nand_chip; /* * Constants for ECC_MODES */ -typedef enum { +enum nand_ecc_mode { + NAND_ECC_INVALID, NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, - NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, -} nand_ecc_modes_t; +}; enum nand_ecc_algo { NAND_ECC_UNKNOWN, @@ -119,85 +119,73 @@ enum nand_ecc_algo { #define NAND_ECC_MAXIMIZE BIT(1) /* + * Option constants for bizarre disfunctionality and real + * features. + */ + +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 BIT(1) + +/* * When using software implementation of Hamming, we can specify which byte * ordering should be used. */ #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) -/* - * Option constants for bizarre disfunctionality and real - * features. - */ -/* Buswidth is 16 bit */ -#define NAND_BUSWIDTH_16 0x00000002 /* Chip has cache program function */ -#define NAND_CACHEPRG 0x00000008 +#define NAND_CACHEPRG BIT(3) +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support * autoincrement. */ -#define NAND_NEED_READRDY 0x00000100 +#define NAND_NEED_READRDY BIT(8) /* Chip does not allow subpage writes */ -#define NAND_NO_SUBPAGE_WRITE 0x00000200 +#define NAND_NO_SUBPAGE_WRITE BIT(9) /* Device is one of 'new' xD cards that expose fake nand command set */ -#define NAND_BROKEN_XD 0x00000400 +#define NAND_BROKEN_XD BIT(10) /* Device behaves just like nand, but is readonly */ -#define NAND_ROM 0x00000800 +#define NAND_ROM BIT(11) /* Device supports subpage reads */ -#define NAND_SUBPAGE_READ 0x00001000 +#define NAND_SUBPAGE_READ BIT(12) +/* Macros to identify the above */ +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated * patterns. */ -#define NAND_NEED_SCRAMBLING 0x00002000 +#define NAND_NEED_SCRAMBLING BIT(13) /* Device needs 3rd row address cycle */ -#define NAND_ROW_ADDR_3 0x00004000 - -/* Options valid for Samsung large page devices */ -#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG - -/* Macros to identify the above */ -#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) - -/* - * There are different places where the manufacturer stores the factory bad - * block markers. - * - * Position within the block: Each of these pages needs to be checked for a - * bad block marking pattern. - */ -#define NAND_BBM_FIRSTPAGE 0x01000000 -#define NAND_BBM_SECONDPAGE 0x02000000 -#define NAND_BBM_LASTPAGE 0x04000000 - -/* Position within the OOB data of the page */ -#define NAND_BBM_POS_SMALL 5 -#define NAND_BBM_POS_LARGE 0 +#define NAND_ROW_ADDR_3 BIT(14) /* Non chip related options */ /* This option skips the bbt scan during initialization. */ -#define NAND_SKIP_BBTSCAN 0x00010000 +#define NAND_SKIP_BBTSCAN BIT(16) /* Chip may not exist, so silence any errors in scan */ -#define NAND_SCAN_SILENT_NODEV 0x00040000 +#define NAND_SCAN_SILENT_NODEV BIT(18) + /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ -#define NAND_BUSWIDTH_AUTO 0x00080000 +#define NAND_BUSWIDTH_AUTO BIT(19) + /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ -#define NAND_USE_BOUNCE_BUFFER 0x00100000 +#define NAND_USES_DMA BIT(20) /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying @@ -207,26 +195,49 @@ enum nand_ecc_algo { * If your controller already takes care of this delay, you don't need to set * this flag. */ -#define NAND_WAIT_TCCS 0x00200000 +#define NAND_WAIT_TCCS BIT(21) /* * Whether the NAND chip is a boot medium. Drivers might use this information * to select ECC algorithms supported by the boot ROM or similar restrictions. */ -#define NAND_IS_BOOT_MEDIUM 0x00400000 +#define NAND_IS_BOOT_MEDIUM BIT(22) /* * Do not try to tweak the timings at runtime. This is needed when the * controller initializes the timings on itself or when it relies on * configuration done by the bootloader. */ -#define NAND_KEEP_TIMINGS 0x00800000 +#define NAND_KEEP_TIMINGS BIT(23) + +/* + * There are different places where the manufacturer stores the factory bad + * block markers. + * + * Position within the block: Each of these pages needs to be checked for a + * bad block marking pattern. + */ +#define NAND_BBM_FIRSTPAGE BIT(24) +#define NAND_BBM_SECONDPAGE BIT(25) +#define NAND_BBM_LASTPAGE BIT(26) + +/* + * Some controllers with pipelined ECC engines override the BBM marker with + * data or ECC bytes, thus making bad block detection through bad block marker + * impossible. Let's flag those chips so the core knows it shouldn't check the + * BBM and consider all blocks good. + */ +#define NAND_NO_BBM_QUIRK BIT(27) /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 +/* Position within the OOB data of the page */ +#define NAND_BBM_POS_SMALL 5 +#define NAND_BBM_POS_LARGE 0 + /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name @@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \ * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { - nand_ecc_modes_t mode; + enum nand_ecc_mode mode; enum nand_ecc_algo algo; int steps; int size; @@ -491,13 +502,17 @@ enum nand_data_interface_type { /** * struct nand_data_interface - NAND interface timing * @type: type of the timing - * @timings: The timing, type according to @type + * @timings: The timing information + * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ struct nand_data_interface { enum nand_data_interface_type type; - union { - struct nand_sdr_timings sdr; + struct nand_timings { + unsigned int mode; + union { + struct nand_sdr_timings sdr; + }; } timings; }; @@ -694,6 +709,7 @@ struct nand_op_instr { /** * struct nand_subop - a sub operation + * @cs: the CS line to select for this NAND sub-operation * @instrs: array of instructions * @ninstrs: length of the @instrs array * @first_instr_start_off: offset to start from for the first instruction @@ -709,6 +725,7 @@ struct nand_op_instr { * controller driver. */ struct nand_subop { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; unsigned int first_instr_start_off; @@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page); int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, u8 *subfeature_param); -/* Default read_page_raw implementation */ +/* read_page_raw implementations */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); +int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); -/* Default write_page_raw implementation */ +/* write_page_raw implementations */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); +int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); @@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit); + bool force_8bit, bool check_only); int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); @@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip); * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); -/* Unregister the MTD device and calls nand_cleanup() */ -void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY @@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, void nand_select_target(struct nand_chip *chip, unsigned int cs); void nand_deselect_target(struct nand_chip *chip); +/* Bitops */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits); + /** * nand_get_data_buf() - Get the internal page buffer * @chip: NAND chip object diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1e2af0ec1f03..60bac2c0ec45 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -20,6 +20,7 @@ */ /* Flash opcodes. */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_WREN 0x06 /* Write enable */ #define SPINOR_OP_RDSR 0x05 /* Read status register */ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ @@ -80,7 +81,6 @@ /* Used for SST flashes only. */ #define SPINOR_OP_BP 0x02 /* Byte program */ -#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ /* Used for S3AN flashes only */ @@ -302,7 +302,7 @@ struct spi_nor; * @read: read data from the SPI NOR. * @write: write data to the SPI NOR. * @erase: erase a sector of the SPI NOR at the offset @offs; if - * not provided by the driver, spi-nor will send the erase + * not provided by the driver, SPI NOR will send the erase * opcode via write_reg(). */ struct spi_nor_controller_ops { @@ -327,16 +327,16 @@ struct spi_nor_manufacturer; struct spi_nor_flash_parameter; /** - * struct spi_nor - Structure for defining a the SPI NOR layer - * @mtd: point to a mtd_info structure + * struct spi_nor - Structure for defining the SPI NOR layer + * @mtd: an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations - * @dev: point to a spi device, or a spi nor controller device. - * @spimem: point to the spi mem device + * @dev: pointer to an SPI device or an SPI NOR controller device + * @spimem: pointer to the SPI memory device * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer - * @info: spi-nor part JDEC MFR id and other info - * @manufacturer: spi-nor manufacturer + * @info: SPI NOR part JEDEC MFR ID and other info + * @manufacturer: SPI NOR manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -344,17 +344,17 @@ struct spi_nor_flash_parameter; * @read_dummy: the dummy needed by the read operation * @program_opcode: the program opcode * @sst_write_second: used by the SST write operation - * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @flags: flag options for the current SPI NOR (SNOR_F_*) * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations - * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations * @controller_ops: SPI NOR controller driver specific operations. - * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. + * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. - * @priv: the private data + * @priv: pointer to the private data */ struct spi_nor { struct mtd_info mtd; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5b364a2e0006..6fc613ed8eae 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1821,8 +1821,6 @@ enum netdev_priv_flags { * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. * - * @addr_list_lock_key: lockdep class annotating - * net_device->addr_list_lock spinlock * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * @@ -2125,7 +2123,6 @@ struct net_device { #endif struct phy_device *phydev; struct sfp_bus *sfp_bus; - struct lock_class_key addr_list_lock_key; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; @@ -2217,10 +2214,13 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, static struct lock_class_key qdisc_tx_busylock_key; \ static struct lock_class_key qdisc_running_key; \ static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ unsigned int i; \ \ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ for (i = 0; i < (dev)->num_tx_queues; i++) \ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ &qdisc_xmit_lock_key); \ @@ -3253,7 +3253,6 @@ static inline void netif_stop_queue(struct net_device *dev) } void netif_tx_stop_all_queues(struct net_device *dev); -void netdev_update_lockdep_key(struct net_device *dev); static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { @@ -4239,6 +4238,11 @@ static inline void netif_addr_lock(struct net_device *dev) spin_lock(&dev->addr_list_lock); } +static inline void netif_addr_lock_nested(struct net_device *dev) +{ + spin_lock_nested(&dev->addr_list_lock, dev->lower_level); +} + static inline void netif_addr_lock_bh(struct net_device *dev) { spin_lock_bh(&dev->addr_list_lock); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 82d8fb422092..4dba3c948932 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -38,7 +38,7 @@ struct nfs4_ace { struct nfs4_acl { uint32_t naces; - struct nfs4_ace aces[0]; + struct nfs4_ace aces[]; }; #define NFS4_MAXLABELLEN 2048 @@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err) case NFS4ERR_NOFILEHANDLE: case NFS4ERR_MOVED: return false; - }; + } return true; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 73eda45f1cfd..6ee9119acc5d 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -230,6 +230,7 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ +#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e5f3e7d8d3d5..5fd0a9ef425f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 { struct nfs4_secinfo_flavors { unsigned int num_flavors; - struct nfs4_secinfo4 flavors[0]; + struct nfs4_secinfo4 flavors[]; }; struct nfs4_secinfo_arg { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 222f6f7b2bb3..6be1aa559b1e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy) * not onlined when onlining the section). * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. + * + * If a driver wants to allow to offline unmovable PageOffline() pages without + * putting them back to the buddy, it can do so via the memory notifier by + * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the + * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() + * pages (now with a reference count of zero) are treated like free pages, + * allowing the containing memory block to get offlined. A driver that + * relies on this feature is aware that re-onlining the memory block will + * require to re-set the pages PageOffline() and not giving them to the + * buddy via online_page_callback_t. */ PAGE_TYPE_OPS(Offline, offline) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8e085713150c..cf2468da68e9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -538,7 +538,7 @@ static inline int lock_page_killable(struct page *page) * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * - * Return value and mmap_sem implications depend on flags; see + * Return value and mmap_lock implications depend on flags; see * __lock_page_or_retry(). */ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9a57e6717e5c..0ad57693f392 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -550,6 +550,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h new file mode 100644 index 000000000000..32b6c52d41b9 --- /dev/null +++ b/include/linux/pgtable.h @@ -0,0 +1,1438 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PGTABLE_H +#define _LINUX_PGTABLE_H + +#include <linux/pfn.h> +#include <asm/pgtable.h> + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU + +#include <linux/mm_types.h> +#include <linux/bug.h> +#include <linux/errno.h> +#include <asm-generic/pgtable_uffd.h> + +#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ + defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED +#endif + +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + +/* + * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] + * + * The pXx_index() functions return the index of the entry in the page + * table page which would control the given virtual address + * + * As these functions may be used by the same code for different levels of + * the page table folding, they are always available, regardless of + * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 + * because in such cases PTRS_PER_PxD equals 1. + */ + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} + +#ifndef pmd_index +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} +#define pmd_index pmd_index +#endif + +#ifndef pud_index +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} +#define pud_index pud_index +#endif + +#ifndef pgd_index +/* Must be a compile-time constant, so implement it as a macro */ +#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#endif + +#ifndef pte_offset_kernel +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) +{ + return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); +} +#define pte_offset_kernel pte_offset_kernel +#endif + +#if defined(CONFIG_HIGHPTE) +#define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ + pte_index((address))) +#define pte_unmap(pte) kunmap_atomic((pte)) +#else +#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) +#define pte_unmap(pte) ((void)(pte)) /* NOP */ +#endif + +/* Find an entry in the second-level page table.. */ +#ifndef pmd_offset +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} +#define pmd_offset pmd_offset +#endif + +#ifndef pud_offset +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); +} +#define pud_offset pud_offset +#endif + +static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) +{ + return (pgd + pgd_index(address)); +}; + +/* + * a shortcut to get a pgd_t in a given mm + */ +#ifndef pgd_offset +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) +#endif + +/* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's + */ +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) + +/* + * In many cases it is known that a virtual address is mapped at PMD or PTE + * level, so instead of traversing all the page table levels, we can get a + * pointer to the PMD entry in user or kernel page table or translate a virtual + * address to the pointer in the PTE in the kernel page tables with simple + * helpers. + */ +static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); +} + +static inline pmd_t *pmd_off_k(unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); +} + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + pmd_t *pmd = pmd_off_k(vaddr); + + return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); +} + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +} +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); +} +#endif + +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} +#endif + + +/* + * If two threads concurrently fault at the same page, the thread that + * won the race updates the PTE and its local TLB/Cache. The other thread + * gives up, simply does nothing, and continues; on architectures where + * software can update TLB, local TLB can be updated here to avoid next page + * fault. This function updates TLB only, do nothing with cache or others. + * It is the difference with function update_mmu_cache. + */ +#ifndef __HAVE_ARCH_UPDATE_MMU_TLB +static inline void update_mmu_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ +} +#define __HAVE_ARCH_UPDATE_MMU_TLB +#endif + +/* + * Some architectures may be able to avoid expensive synchronization + * primitives when modifications are made to PTE's which are already + * not present, or in the process of an address space destruction. + */ +#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); +} +#endif + +/* + * On some architectures hardware does not set page access bit when accessing + * memory page, it is responsibilty of software setting this bit. It brings + * out extra page fault penalty to track page access bit. For optimization page + * access bit can be set during all page fault flow on these arches. + * To be differentiate with macro pte_mkyoung, this macro is used on platforms + * where software maintains page access bit. + */ +#ifndef pte_sw_mkyoung +static inline pte_t pte_sw_mkyoung(pte_t pte) +{ + return pte; +} +#define pte_sw_mkyoung pte_sw_mkyoung +#endif + +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif + +#ifndef pmdp_collapse_flush +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return *pmdp; +} +#define pmdp_collapse_flush pmdp_collapse_flush +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + +#ifndef pte_access_permitted +#define pte_access_permitted(pte, write) \ + (pte_present(pte) && (!(write) || pte_write(pte))) +#endif + +#ifndef pmd_access_permitted +#define pmd_access_permitted(pmd, write) \ + (pmd_present(pmd) && (!(write) || pmd_write(pmd))) +#endif + +#ifndef pud_access_permitted +#define pud_access_permitted(pud, write) \ + (pud_present(pud) && (!(write) || pud_write(pud))) +#endif + +#ifndef p4d_access_permitted +#define p4d_access_permitted(p4d, write) \ + (p4d_present(p4d) && (!(write) || p4d_write(p4d))) +#endif + +#ifndef pgd_access_permitted +#define pgd_access_permitted(pgd, write) \ + (pgd_present(pgd) && (!(write) || pgd_write(pgd))) +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} +#endif + +#ifndef __HAVE_ARCH_P4D_SAME +static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) +{ + return p4d_val(p4d_a) == p4d_val(p4d_b); +} +#endif + +#ifndef __HAVE_ARCH_PGD_SAME +static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) +{ + return pgd_val(pgd_a) == pgd_val(pgd_b); +} +#endif + +/* + * Use set_p*_safe(), and elide TLB flushing, when confident that *no* + * TLB flush will be required as a result of the "set". For example, use + * in scenarios where it is known ahead of time that the routine is + * setting non-present entries, or re-setting an existing entry to the + * same value. Otherwise, use the typical "set" helpers and flush the + * TLB. + */ +#define set_pte_safe(ptep, pte) \ +({ \ + WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ + set_pte(ptep, pte); \ +}) + +#define set_pmd_safe(pmdp, pmd) \ +({ \ + WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ + set_pmd(pmdp, pmd); \ +}) + +#define set_pud_safe(pudp, pud) \ +({ \ + WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ + set_pud(pudp, pud); \ +}) + +#define set_p4d_safe(p4dp, p4d) \ +({ \ + WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ + set_p4d(p4dp, p4d); \ +}) + +#define set_pgd_safe(pgdp, pgd) \ +({ \ + WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ + set_pgd(pgdp, pgd); \ +}) + +#ifndef __HAVE_ARCH_DO_SWAP_PAGE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_do_swap_page() can restore this + * metadata when a page is swapped back in. + */ +static inline void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + +} +#endif + +#ifndef __HAVE_ARCH_UNMAP_ONE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_unmap_one() can save this + * metadata on a swap-out of a page. + */ +static inline int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t orig_pte) +{ + return 0; +} +#endif + +#ifndef __HAVE_ARCH_PGD_OFFSET_GATE +#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) +#endif + +#ifndef __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#endif + +#ifndef pte_accessible +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef p4d_addr_end +#define p4d_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); + +#ifndef __PAGETABLE_P4D_FOLDED +void p4d_clear_bad(p4d_t *); +#else +#define p4d_clear_bad(p4d) do { } while (0) +#endif + +#ifndef __PAGETABLE_PUD_FOLDED +void pud_clear_bad(pud_t *); +#else +#define pud_clear_bad(p4d) do { } while (0) +#endif + +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (unlikely(p4d_bad(*p4d))) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + /* + * Get the current pte state, but zero it out to make it + * non-present, preventing the hardware from asynchronously + * updating it. + */ + return ptep_get_and_clear(vma->vm_mm, addr, ptep); +} + +static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * The pte is non-present, so there's no hardware state to + * preserve. + */ + set_pte_at(vma->vm_mm, addr, ptep, pte); +} + +#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +/* + * Start a pte protection read-modify-write transaction, which + * protects against asynchronous hardware modifications to the pte. + * The intention is not to prevent the hardware from making pte + * updates, but to prevent any updates it may make from being lost. + * + * This does not protect against other software modifications of the + * pte; the appropriate pte lock must be held over the transation. + * + * Note that this interface is intended to be batchable, meaning that + * ptep_modify_prot_commit may not actually update the pte, but merely + * queue the update to be done at some later time. The update must be + * actually committed before the pte lock is released, however. + */ +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + return __ptep_modify_prot_start(vma, addr, ptep); +} + +/* + * Commit an update to a pte, leaving any hardware-controlled bits in + * the PTE unmodified. + */ +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + __ptep_modify_prot_commit(vma, addr, ptep, pte); +} +#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ +#endif /* CONFIG_MMU */ + +/* + * No-op macros that just return the current protection value. Defined here + * because these macros can be used used even if CONFIG_MMU is not defined. + */ +#ifndef pgprot_encrypted +#define pgprot_encrypted(prot) (prot) +#endif + +#ifndef pgprot_decrypted +#define pgprot_decrypted(prot) (prot) +#endif + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables and + * other process state with the actual context switch code for + * paravirtualized guests. By convention, only one of the batched + * update (lazy) modes (CPU, MMU) should be active at any given time, + * entry should never be nested, and entry and exits should always be + * paired. This is for sanity of maintaining and reasoning about the + * kernel code. In this case, the exit (end of the context switch) is + * in architecture-specific code, and so doesn't need a generic + * definition. + */ +#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH +#define arch_start_context_switch(prev) do {} while (0) +#endif + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif +#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif + +#ifndef __HAVE_PFNMAP_TRACKING +/* + * Interfaces that can be used by architecture code to keep track of + * memory type of pfn mappings specified by the remap_pfn_range, + * vmf_insert_pfn. + */ + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size) +{ + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vmf_insert_pfn(). + */ +static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn) +{ +} + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#else +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); +#endif + +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif + +#ifdef CONFIG_MMU + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +#ifndef pmd_write +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* pmd_write */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif /* pud_write */ + +#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline int pmd_devmap(pmd_t pmd) +{ + return 0; +} +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif + +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + +/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ +static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) +{ + pud_t pudval = READ_ONCE(*pud); + + if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) + return 1; + if (unlikely(pud_bad(pudval))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +/* See pmd_trans_unstable for discussion. */ +static inline int pud_trans_unstable(pud_t *pud) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); +#else + return 0; +#endif +} + +#ifndef pmd_read_atomic +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + /* + * Depend on compiler for an atomic pmd read. NOTE: this is + * only going to work, if the pmdval_t isn't larger than + * an unsigned long. + */ + return *pmdp; +} +#endif + +#ifndef arch_needs_pgtable_deposit +#define arch_needs_pgtable_deposit() (false) +#endif +/* + * This function is meant to be used by sites walking pagetables with + * the mmap_lock held in read mode to protect against MADV_DONTNEED and + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd + * into a null pmd and the transhuge page fault can convert a null pmd + * into an hugepmd or into a regular pmd (if the hugepage allocation + * fails). While holding the mmap_lock in read mode the pmd becomes + * stable and stops changing under us only if it's not null and not a + * transhuge pmd. When those races occurs and this function makes a + * difference vs the standard pmd_none_or_clear_bad, the result is + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. + * + * For 32bit kernels with a 64bit large pmd_t this automatically takes + * care of reading the pmd atomically to avoid SMP race conditions + * against pmd_populate() when the mmap_lock is hold for reading by the + * caller (a special atomic read not done by "gcc" as in the generic + * version above, is also needed when THP is disabled because the page + * fault can populate the pmd from under us). + */ +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. + * + * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, + * pmd_read_atomic is allowed to return a not atomic pmdval + * (for example pointing to an hugepage that has never been + * mapped in the pmd). The below checks will only care about + * the low part of the pmd with 32bit PAE x86 anyway, with the + * exception of pmd_none(). So the important thing is that if + * the low part of the pmd is found null, the high part will + * be also null or the pmd_none() check below would be + * confused. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + /* + * !pmd_present() checks for pmd migration entries + * + * The complete check uses is_pmd_migration_entry() in linux/swapops.h + * But using that requires moving current function and pmd_trans_unstable() + * to linux/swapops.h to resovle dependency, which is too much code move. + * + * !pmd_present() is equivalent to is_pmd_migration_entry() currently, + * because !pmd_present() pages can only be under migration not swapped + * out. + * + * pmd_none() is preseved for future condition checks on pmd migration + * entries and not confusing with this function name, although it is + * redundant with !pmd_present(). + */ + if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || + (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) + return 1; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +/* + * This is a noop if Transparent Hugepage Support is not built into + * the kernel. Otherwise it is equivalent to + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in + * places that already verified the pmd is not none and they want to + * walk ptes while holding the mmap sem in read mode (write mode don't + * need this). If THP is not enabled, the pmd can't go away under the + * code even if MADV_DONTNEED runs, but if THP is enabled we need to + * run a pmd_trans_unstable before walking the ptes after + * split_huge_pmd returns (because it may have run when the pmd become + * null, but then a page fault can map in a THP and not a regular page). + */ +static inline int pmd_trans_unstable(pmd_t *pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return pmd_none_or_trans_huge_or_clear_bad(pmd); +#else + return 0; +#endif +} + +#ifndef CONFIG_NUMA_BALANCING +/* + * Technically a PTE can be PROTNONE even when not doing NUMA balancing but + * the only case the kernel cares is for NUMA balancing and is only ever set + * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked + * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * is the responsibility of the caller to distinguish between PROT_NONE + * protections and NUMA hinting fault protections. + */ +static inline int pte_protnone(pte_t pte) +{ + return 0; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return 0; +} +#endif /* CONFIG_NUMA_BALANCING */ + +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifndef __PAGETABLE_P4D_FOLDED +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +#else +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +#endif /* !__PAGETABLE_P4D_FOLDED */ + +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; +} +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return 0; +} +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. stock flush_tlb_range() typically has optimization to nuke the + * entire TLB TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single thp flush will + * invalidate the entire TLB which is not desitable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#else +#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() +#endif +#endif + +struct file; +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); + +#ifndef CONFIG_X86_ESPFIX64 +static inline void init_espfix_bsp(void) { } +#endif + +extern void __init pgtable_cache_init(void); + +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + return true; +} + +static inline bool arch_has_pfn_modify_check(void) +{ + return false; +} +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ + +/* + * Architecture PAGE_KERNEL_* fallbacks + * + * Some architectures don't define certain PAGE_KERNEL_* flags. This is either + * because they really don't support them, or the port needs to be updated to + * reflect the required functionality. Below are a set of relatively safe + * fallbacks, as best effort, which we can count on in lieu of the architectures + * not defining them on their own yet. + */ + +#ifndef PAGE_KERNEL_RO +# define PAGE_KERNEL_RO PAGE_KERNEL +#endif + +#ifndef PAGE_KERNEL_EXEC +# define PAGE_KERNEL_EXEC PAGE_KERNEL +#endif + +/* + * Page Table Modification bits for pgtbl_mod_mask. + * + * These are used by the p?d_alloc_track*() set of functions an in the generic + * vmalloc/ioremap code to track at which page-table levels entries have been + * modified. Based on that the code can better decide when vmalloc and ioremap + * mapping changes need to be synchronized to other page-tables in the system. + */ +#define __PGTBL_PGD_MODIFIED 0 +#define __PGTBL_P4D_MODIFIED 1 +#define __PGTBL_PUD_MODIFIED 2 +#define __PGTBL_PMD_MODIFIED 3 +#define __PGTBL_PTE_MODIFIED 4 + +#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) +#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) +#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) +#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) +#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) + +/* Page-Table Modification Mask */ +typedef unsigned int pgtbl_mod_mask; + +#endif /* !__ASSEMBLY__ */ + +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + +/* + * On some architectures it depends on the mm if the p4d/pud or pmd + * layer of the page table hierarchy is folded or not. + */ +#ifndef mm_p4d_folded +#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) +#endif + +#ifndef mm_pud_folded +#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) +#endif + +#ifndef mm_pmd_folded +#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) +#endif + +/* + * p?d_leaf() - true if this entry is a final mapping to a physical address. + * This differs from p?d_huge() by the fact that they are always available (if + * the architecture supports large pages at the appropriate level) even + * if CONFIG_HUGETLB_PAGE is not defined. + * Only meaningful when called on a valid entry. + */ +#ifndef pgd_leaf +#define pgd_leaf(x) 0 +#endif +#ifndef p4d_leaf +#define p4d_leaf(x) 0 +#endif +#ifndef pud_leaf +#define pud_leaf(x) 0 +#endif +#ifndef pmd_leaf +#define pmd_leaf(x) 0 +#endif + +#endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 0c31b9461262..50afd0d0084c 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -9,6 +9,10 @@ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ #define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */ +#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */ +#ifdef CONFIG_WATCH_QUEUE +#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */ +#endif /** * struct pipe_buffer - a linux kernel pipe buffer @@ -34,8 +38,10 @@ struct pipe_buffer { * @wr_wait: writer wait point in case of full pipe * @head: The point of buffer production * @tail: The point of buffer consumption + * @note_loss: The next read() should insert a data-lost message * @max_usage: The maximum number of slots that may be used in the ring * @ring_size: total number of buffers (should be a power of 2) + * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs * @tmp_page: cached released page * @readers: number of current readers of this pipe * @writers: number of current writers of this pipe @@ -46,6 +52,7 @@ struct pipe_buffer { * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers * @user: the user who created this pipe + * @watch_queue: If this pipe is a watch_queue, this is the stuff for that **/ struct pipe_inode_info { struct mutex mutex; @@ -54,6 +61,10 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; +#ifdef CONFIG_WATCH_QUEUE + bool note_loss; +#endif + unsigned int nr_accounted; unsigned int readers; unsigned int writers; unsigned int files; @@ -64,6 +75,9 @@ struct pipe_inode_info { struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; +#ifdef CONFIG_WATCH_QUEUE + struct watch_queue *watch_queue; +#endif }; /* @@ -239,9 +253,20 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; +#ifdef CONFIG_WATCH_QUEUE +unsigned long account_pipe_buffers(struct user_struct *user, + unsigned long old, unsigned long new); +bool too_many_pipe_buffers_soft(unsigned long user_bufs); +bool too_many_pipe_buffers_hard(unsigned long user_bufs); +bool pipe_is_unprivileged_user(void); +#endif + /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ +#ifdef CONFIG_WATCH_QUEUE +int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); +#endif long pipe_fcntl(struct file *, unsigned int, unsigned long arg); -struct pipe_inode_info *get_pipe_info(struct file *file); +struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); int create_pipe_files(struct file **, int); unsigned int round_pipe_size(unsigned long size); diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h index 6a9b28399b39..24953981bd9f 100644 --- a/include/linux/platform_data/i2c-pxa.h +++ b/include/linux/platform_data/i2c-pxa.h @@ -7,54 +7,6 @@ #ifndef _I2C_PXA_H_ #define _I2C_PXA_H_ -#if 0 -#define DEF_TIMEOUT 3 -#else -/* need a longer timeout if we're dealing with the fact we may well be - * looking at a multi-master environment -*/ -#define DEF_TIMEOUT 32 -#endif - -#define BUS_ERROR (-EREMOTEIO) -#define XFER_NAKED (-ECONNREFUSED) -#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ - -/* ICR initialize bit values -* -* 15. FM 0 (100 Khz operation) -* 14. UR 0 (No unit reset) -* 13. SADIE 0 (Disables the unit from interrupting on slave addresses -* matching its slave address) -* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration -* in master mode) -* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) -* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) -* 9. IRFIE 1 (Enable interrupts from full buffer received) -* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) -* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) -* 6. IUE 0 (Disable unit until we change settings) -* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) -* 4. MA 0 (Only send stop with the ICR stop bit) -* 3. TB 0 (We are not transmitting a byte initially) -* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) -* 1. STOP 0 (Do not send a STOP) -* 0. START 0 (Do not send a START) -* -*/ -#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) - -/* I2C status register init values - * - * 10. BED 1 (Clear bus error detected) - * 9. SAD 1 (Clear slave address detected) - * 7. IRF 1 (Clear IDBR Receive Full) - * 6. ITE 1 (Clear IDBR Transmit Empty) - * 5. ALD 1 (Clear Arbitration Loss Detected) - * 4. SSD 1 (Clear Slave Stop Detected) - */ -#define I2C_ISR_INIT 0x7FF /* status register init */ - struct i2c_pxa_platform_data { unsigned int class; unsigned int use_pio :1; diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index 08e639e047e5..03e92c71b3fa 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */ * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index deb849bcf0ec..08675b16f9e1 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -49,7 +49,7 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 747861816f4f..d5c4a329321d 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -42,6 +42,18 @@ struct dev_pm_opp_supply { }; /** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + +/** * struct dev_pm_opp_info - OPP freq/voltage/current values * @rate: Target clk rate in hz * @supplies: Array of voltage/current values for all power supplies @@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); +int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); void dev_pm_opp_of_register_em(struct cpumask *cpus); #else static inline int dev_pm_opp_of_add_table(struct device *dev) @@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np, { return -ENOTSUPP; } + +static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) +{ + return -ENOTSUPP; +} #endif #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index dcd5a71e6c67..ac1345a48ad0 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -61,6 +61,7 @@ enum { POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, }; enum { @@ -139,6 +140,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, @@ -158,6 +160,9 @@ enum power_supply_property { POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CALIBRATE, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH, + POWER_SUPPLY_PROP_MANUFACTURE_DAY, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, @@ -223,9 +228,9 @@ struct power_supply_config { struct power_supply_desc { const char *name; enum power_supply_type type; - enum power_supply_usb_type *usb_types; + const enum power_supply_usb_type *usb_types; size_t num_usb_types; - enum power_supply_property *properties; + const enum power_supply_property *properties; size_t num_properties; /* @@ -346,8 +351,12 @@ struct power_supply_battery_info { int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ int voltage_max_design_uv; /* microVolts */ + int tricklecharge_current_ua; /* microAmps */ int precharge_current_ua; /* microAmps */ + int precharge_voltage_max_uv; /* microVolts */ int charge_term_current_ua; /* microAmps */ + int charge_restart_voltage_uv; /* microVolts */ + int overvoltage_limit_uv; /* microVolts */ int constant_charge_current_max_ua; /* microAmps */ int constant_charge_voltage_max_uv; /* microVolts */ int factory_internal_resistance_uohm; /* microOhms */ diff --git a/include/linux/ras.h b/include/linux/ras.h index 7c3debb47c87..1f4048bf2674 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; } #endif #ifdef CONFIG_RAS_CEC -void __init cec_init(void); int __init parse_cec_param(char *str); -int cec_add_elem(u64 pfn); -#else -static inline void __init cec_init(void) { } -static inline int cec_add_elem(u64 pfn) { return -ENODEV; } #endif #ifdef CONFIG_RAS diff --git a/include/linux/regset.h b/include/linux/regset.h index bf0243779738..46d6ae68c455 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos, if (*kbuf) { memset(*kbuf, 0, copy); *kbuf += copy; - } else if (__clear_user(*ubuf, copy)) + } else if (clear_user(*ubuf, copy)) return -EFAULT; else *ubuf += copy; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 988d176472df..3a6adfa70fb0 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -77,7 +77,7 @@ struct anon_vma { struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; - struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ + struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ struct rb_node rb; /* locked by anon_vma->rwsem */ unsigned long rb_subtree_last; #ifdef CONFIG_DEBUG_VM_RB diff --git a/include/linux/sched.h b/include/linux/sched.h index c5d96e3e7fff..b62e6aaf28f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -31,6 +31,7 @@ #include <linux/task_io_accounting.h> #include <linux/posix-timers.h> #include <linux/rseq.h> +#include <linux/kcsan.h> /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -1197,6 +1198,9 @@ struct task_struct { #ifdef CONFIG_KASAN unsigned int kasan_depth; #endif +#ifdef CONFIG_KCSAN + struct kcsan_ctx kcsan_ctx; +#endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack: */ @@ -1304,7 +1308,9 @@ struct task_struct { #ifdef CONFIG_X86_MCE u64 mce_addr; - u64 mce_status; + __u64 mce_ripv : 1, + mce_whole_page : 1, + __mce_reserved : 62; struct callback_head mce_kill_me; #endif diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index 95fb9e025247..00c45a0e6abe 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *); * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern void show_stack(struct task_struct *task, unsigned long *sp, + const char *loglvl); extern void sched_show_task(struct task_struct *p); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index a132d875d351..480a4d1b7dd8 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -53,7 +53,7 @@ void mmdrop(struct mm_struct *mm); /* * This has to be called after a get_task_mm()/mmget_not_zero() - * followed by taking the mmap_sem for writing before modifying the + * followed by taking the mmap_lock for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * * It also has to be called when mmgrab() is used in the context of @@ -61,14 +61,14 @@ void mmdrop(struct mm_struct *mm); * the context of the process to run down_write() on that pinned mm. * * NOTE: find_extend_vma() called from GUP context is the only place - * that can modify the "mm" (notably the vm_start/end) under mmap_sem + * that can modify the "mm" (notably the vm_start/end) under mmap_lock * for reading and outside the context of the process, so it is also - * the only case that holds the mmap_sem for reading that must call - * this function. Generally if the mmap_sem is hold for reading + * the only case that holds the mmap_lock for reading that must call + * this function. Generally if the mmap_lock is hold for reading * there's no need of this check after get_task_mm()/mmget_not_zero(). * * This function can be obsoleted and the check can be removed, after - * the coredump code will hold the mmap_sem for writing before + * the coredump code will hold the mmap_lock for writing before * invoking the ->core_dump methods. */ static inline bool mmget_still_valid(struct mm_struct *mm) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8ccd82105de8..76731230bbc5 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -221,7 +221,7 @@ struct sctp_datahdr { __be16 stream; __be16 ssn; __u32 ppid; - __u8 payload[0]; + __u8 payload[]; }; struct sctp_data_chunk { @@ -269,7 +269,7 @@ struct sctp_inithdr { __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; - __u8 params[0]; + __u8 params[]; }; struct sctp_init_chunk { @@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param { /* Section 3.3.2.1 Host Name Address (11) */ struct sctp_hostname_param { struct sctp_paramhdr param_hdr; - uint8_t hostname[0]; + uint8_t hostname[]; }; /* Section 3.3.2.1 Supported Address Types (12) */ struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; - __be16 types[0]; + __be16 types[]; }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ @@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param { /* ADDIP Section 4.2.7 Supported Extensions Parameter */ struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.1 Random */ struct sctp_random_param { struct sctp_paramhdr param_hdr; - __u8 random_val[0]; + __u8 random_val[]; }; /* AUTH Section 3.2 Chunk List */ struct sctp_chunks_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.3 HMAC Algorithm */ struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; - __be16 hmac_ids[0]; + __be16 hmac_ids[]; }; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): @@ -350,7 +350,7 @@ struct sctp_initack_chunk { /* Section 3.3.3.1 State Cookie (7) */ struct sctp_cookie_param { struct sctp_paramhdr p; - __u8 body[0]; + __u8 body[]; }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ @@ -384,7 +384,7 @@ struct sctp_sackhdr { __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - union sctp_sack_variable variable[0]; + union sctp_sack_variable variable[]; }; struct sctp_sack_chunk { @@ -436,7 +436,7 @@ struct sctp_shutdown_chunk { struct sctp_errhdr { __be16 cause; __be16 length; - __u8 variable[0]; + __u8 variable[]; }; struct sctp_operr_chunk { @@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip { struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_fwdtsn_skip skip[0]; + struct sctp_fwdtsn_skip skip[]; }; struct sctp_fwdtsn_chunk { @@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip { struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_ifwdtsn_skip skip[0]; + struct sctp_ifwdtsn_skip skip[]; }; struct sctp_ifwdtsn_chunk { @@ -658,7 +658,7 @@ struct sctp_addip_param { struct sctp_addiphdr { __be32 serial; - __u8 params[0]; + __u8 params[]; }; struct sctp_addip_chunk { @@ -718,7 +718,7 @@ struct sctp_addip_chunk { struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; - __u8 hmac[0]; + __u8 hmac[]; }; struct sctp_auth_chunk { @@ -733,7 +733,7 @@ struct sctp_infox { struct sctp_reconf_chunk { struct sctp_chunkhdr chunk_hdr; - __u8 params[0]; + __u8 params[]; }; struct sctp_strreset_outreq { @@ -741,13 +741,13 @@ struct sctp_strreset_outreq { __be32 request_seq; __be32 response_seq; __be32 send_reset_at_tsn; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_inreq { struct sctp_paramhdr param_hdr; __be32 request_seq; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_tsnreq { diff --git a/include/linux/security.h b/include/linux/security.h index b3f2cb21b4f2..0a0a03b36a3b 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -56,6 +56,8 @@ struct mm_struct; struct fs_context; struct fs_parameter; enum fs_value_type; +struct watch; +struct watch_notification; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 @@ -390,6 +392,8 @@ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); +int security_task_fix_setgid(struct cred *new, const struct cred *old, + int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); @@ -1034,6 +1038,13 @@ static inline int security_task_fix_setuid(struct cred *new, return cap_task_fix_setuid(new, old, flags); } +static inline int security_task_fix_setgid(struct cred *new, + const struct cred *old, + int flags) +{ + return 0; +} + static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; @@ -1282,6 +1293,28 @@ static inline int security_locked_down(enum lockdown_reason what) } #endif /* CONFIG_SECURITY */ +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n); +#else +static inline int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n) +{ + return 0; +} +#endif + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +int security_watch_key(struct key *key); +#else +static inline int security_watch_key(struct key *key) +{ + return 0; +} +#endif + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); @@ -1750,8 +1783,8 @@ static inline int security_path_chroot(const struct path *path) int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); -int security_key_permission(key_ref_t key_ref, - const struct cred *cred, unsigned perm); +int security_key_permission(key_ref_t key_ref, const struct cred *cred, + enum key_need_perm need_perm); int security_key_getsecurity(struct key *key, char **_buffer); #else @@ -1769,7 +1802,7 @@ static inline void security_key_free(struct key *key) static inline int security_key_permission(key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) { return 0; } diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 0491d963d47e..8b97204f35a7 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -37,9 +37,25 @@ #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/compiler.h> +#include <linux/kcsan-checks.h> #include <asm/processor.h> /* + * The seqlock interface does not prescribe a precise sequence of read + * begin/retry/end. For readers, typically there is a call to + * read_seqcount_begin() and read_seqcount_retry(), however, there are more + * esoteric cases which do not follow this pattern. + * + * As a consequence, we take the following best-effort approach for raw usage + * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, + * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as + * atomics; if there is a matching read_seqcount_retry() call, no following + * memory operations are considered atomic. Usage of seqlocks via seqlock_t + * interface is not affected. + */ +#define KCSAN_SEQLOCK_REGION_MAX 1000 + +/* * Version using sequence counter only. * This can be used when code has its own mutex protecting the * updating starting before the write_seqcountbeqin() and ending @@ -115,6 +131,7 @@ repeat: cpu_relax(); goto repeat; } + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } @@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } @@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret & ~1; } @@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) */ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) { - return unlikely(s->sequence != start); + kcsan_atomic_next(0); + return unlikely(READ_ONCE(s->sequence) != start); } /** @@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) static inline void raw_write_seqcount_begin(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); } @@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } /** @@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } static inline int raw_read_seqcount_latch(seqcount_t *s) @@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s) static inline void write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); + kcsan_nestable_atomic_begin(); s->sequence+=2; + kcsan_nestable_atomic_end(); } typedef struct { @@ -430,11 +463,21 @@ typedef struct { */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - return read_seqcount_begin(&sl->seqcount); + unsigned ret = read_seqcount_begin(&sl->seqcount); + + kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ + kcsan_flat_atomic_begin(); + return ret; } static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { + /* + * Assume not nested: read_seqretry() may be called multiple times when + * completing read critical section. + */ + kcsan_flat_atomic_end(); + return read_seqcount_retry(&sl->seqcount, start); } diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 86281ac7c305..860e0f843c12 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page) #endif #ifndef set_mce_nospec -static inline int set_mce_nospec(unsigned long pfn) +static inline int set_mce_nospec(unsigned long pfn, bool unmap) { return 0; } diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 83bd8cb475d7..b7af8cc13eda 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -64,7 +64,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; - int skip; /* input argument: How many entries to skip */ + unsigned int skip; /* input argument: How many entries to skip */ }; extern void save_stack_trace(struct stack_trace *trace); diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 4f6b28487f28..98da816b5fc2 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -76,7 +76,7 @@ struct rpc_auth { unsigned int au_verfsize; /* size of reply verifier */ unsigned int au_ralign; /* words before UL header */ - unsigned int au_flags; + unsigned long au_flags; const struct rpc_authops *au_ops; rpc_authflavor_t au_flavor; /* pseudoflavor (note may * differ from the flavor in @@ -89,7 +89,8 @@ struct rpc_auth { }; /* rpc_auth au_flags */ -#define RPCAUTH_AUTH_DATATOUCH 0x00000002 +#define RPCAUTH_AUTH_DATATOUCH (1) +#define RPCAUTH_AUTH_UPDATE_SLACK (2) struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index bc07e51f20d1..bf4ac8a0268c 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -84,6 +84,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fd390894a584..386628b36bc7 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -254,6 +254,7 @@ struct svc_rqst { struct page * *rq_page_end; /* one past the last page */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ + struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; __be32 rq_xid; /* transmission id */ u32 rq_prog; /* program number */ @@ -299,6 +300,7 @@ struct svc_rqst { struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ + void ** rq_lease_breaker; /* The v4 client breaking a lease */ }; #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index cbcfbd0521e3..7ed82625dc0b 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -48,7 +48,6 @@ #include <linux/sunrpc/rpc_rdma.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> -#define SVCRDMA_DEBUG /* Default and maximum inline threshold sizes */ enum { @@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt { }; /* svc_rdma_backchannel.c */ -extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - __be32 *rdma_resp, - struct xdr_buf *rcvbuf); +extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt); /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 9e1e046de176..aca35ab5cff2 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u return 0; } +static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt) +{ + return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) || + (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0); +} + int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index ca39a388dc22..f09c82b0a7ae 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,7 +20,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 771baadaee9d..b7ac7fe68306 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,7 +28,7 @@ struct svc_sock { /* private TCP part */ /* On-the-wire fragment header: */ - __be32 sk_reclen; + __be32 sk_marker; /* As we receive a record, this includes the length received so * far (including the fragment header): */ u32 sk_tcplen; @@ -41,12 +41,12 @@ struct svc_sock { static inline u32 svc_sock_reclen(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; + return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK; } static inline u32 svc_sock_final_rec(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; + return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT; } /* diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c91b1e344d56..216185bb3014 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -32,20 +32,10 @@ /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 -/* Default Thermal Governor */ -#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) -#define DEFAULT_THERMAL_GOVERNOR "step_wise" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) -#define DEFAULT_THERMAL_GOVERNOR "fair_share" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) -#define DEFAULT_THERMAL_GOVERNOR "user_space" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) -#define DEFAULT_THERMAL_GOVERNOR "power_allocator" -#endif - struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; +struct thermal_attr; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, @@ -130,11 +120,6 @@ struct thermal_cooling_device { struct list_head node; }; -struct thermal_attr { - struct device_attribute attr; - char name[THERMAL_NAME_LENGTH]; -}; - /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone @@ -347,21 +332,6 @@ struct thermal_zone_of_device_ops { int (*set_trip_temp)(void *, int, int); }; -/** - * struct thermal_trip - representation of a point in temperature domain - * @np: pointer to struct device_node that this trip point was created from - * @temperature: temperature value in miliCelsius - * @hysteresis: relative hysteresis in miliCelsius - * @type: trip point type - */ - -struct thermal_trip { - struct device_node *np; - int temperature; - int hysteresis; - enum thermal_trip_type type; -}; - /* Function declarations */ #ifdef CONFIG_THERMAL_OF int thermal_zone_of_get_sensor_id(struct device_node *tz_np, @@ -413,19 +383,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, #endif -#if IS_ENABLED(CONFIG_THERMAL) -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ - return cdev->ops->get_requested_power && cdev->ops->state2power && - cdev->ops->power2state; -} - -int power_actor_get_max_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *max_power); -int power_actor_get_min_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *min_power); -int power_actor_set_power(struct thermal_cooling_device *, - struct thermal_instance *, u32); +#ifdef CONFIG_THERMAL struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); @@ -439,7 +397,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); -void thermal_zone_set_trips(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); @@ -457,24 +414,9 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -int get_tz_trend(struct thermal_zone_device *, int); -struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, - struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); #else -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ return false; } -static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, u32 *max_power) -{ return 0; } -static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, - u32 *min_power) -{ return -ENODEV; } -static inline int power_actor_set_power(struct thermal_cooling_device *cdev, - struct thermal_instance *tz, u32 power) -{ return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, @@ -484,21 +426,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register( static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } -static inline int thermal_zone_bind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight) -{ return -ENODEV; } -static inline int thermal_zone_unbind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev) -{ return -ENODEV; } -static inline void thermal_zone_device_update(struct thermal_zone_device *tz, - enum thermal_notify_event event) -{ } -static inline void thermal_zone_set_trips(struct thermal_zone_device *tz) -{ } static inline struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) @@ -530,12 +457,7 @@ static inline int thermal_zone_get_slope( static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) -{ return -ENODEV; } -static inline struct thermal_instance * -get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip) -{ return ERR_PTR(-ENODEV); } + static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) { } static inline void thermal_notify_framework(struct thermal_zone_device *tz, diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 299cbb8c63bb..44073d06710f 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -124,7 +124,7 @@ struct tifm_adapter { int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); - struct tifm_dev *sockets[0]; + struct tifm_dev *sockets[]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 9861c89f93be..7bcadca22100 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,9 +2,9 @@ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ +#include <linux/instrumented.h> #include <linux/sched.h> #include <linux/thread_info.h> -#include <linux/kasan-checks.h> #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) @@ -58,7 +58,7 @@ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) unsigned long res = n; might_fault(); if (likely(access_ok(from, n))) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } if (unlikely(res)) @@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (access_ok(to, n)) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; @@ -301,62 +301,20 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } -/* - * probe_kernel_read(): safely attempt to read from a location - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from address @src to the buffer at @dst. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long probe_kernel_read(void *dst, const void *src, size_t size); -extern long probe_kernel_read_strict(void *dst, const void *src, size_t size); -extern long __probe_kernel_read(void *dst, const void *src, size_t size); +bool probe_kernel_read_allowed(const void *unsafe_src, size_t size); -/* - * probe_user_read(): safely attempt to read from a location in user space - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from address @src to the buffer at @dst. If a kernel fault - * happens, handle that and return -EFAULT. - */ +extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long probe_user_read(void *dst, const void __user *src, size_t size); -extern long __probe_user_read(void *dst, const void __user *src, size_t size); -/* - * probe_kernel_write(): safely attempt to write to a location - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); -extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); - -/* - * probe_user_write(): safely attempt to write to a location in user space - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); -extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); - -extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); -extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, - long count); -extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); -extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, - long count); -extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); + +long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, + long count); + +long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, + long count); +long strnlen_user_nofault(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 5453af87a33e..239db794357c 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -18,6 +18,16 @@ struct vdpa_callback { }; /** + * vDPA notification area + * @addr: base address of the notification area + * @size: size of the notification area + */ +struct vdpa_notification_area { + resource_size_t addr; + resource_size_t size; +}; + +/** * vDPA device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA @@ -73,6 +83,10 @@ struct vdpa_device { * @vdev: vdpa device * @idx: virtqueue index * Returns virtqueue state (last_avail_idx) + * @get_vq_notification: Get the notification area for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * Returns the notifcation area * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -162,6 +176,8 @@ struct vdpa_config_ops { bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + struct vdpa_notification_area + (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 9e2763d7c159..59bd50f99291 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -105,9 +105,9 @@ struct vringh_kiov { /* Helpers for userspace vrings. */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used); + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used); static inline void vringh_iov_init(struct vringh_iov *iov, struct iovec *iovec, unsigned num) diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h new file mode 100644 index 000000000000..5e08db2adc31 --- /dev/null +++ b/include/linux/watch_queue.h @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* User-mappable watch queue + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * See Documentation/watch_queue.rst + */ + +#ifndef _LINUX_WATCH_QUEUE_H +#define _LINUX_WATCH_QUEUE_H + +#include <uapi/linux/watch_queue.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> + +#ifdef CONFIG_WATCH_QUEUE + +struct cred; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */ + __u32 info_filter; /* Filter on watch_notification::info */ + __u32 info_mask; /* Mask of relevant bits in info_filter */ +}; + +struct watch_filter { + union { + struct rcu_head rcu; + unsigned long type_filter[2]; /* Bitmask of accepted types */ + }; + u32 nr_filters; /* Number of filters */ + struct watch_type_filter filters[]; +}; + +struct watch_queue { + struct rcu_head rcu; + struct watch_filter __rcu *filter; + struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */ + struct hlist_head watches; /* Contributory watches */ + struct page **notes; /* Preallocated notifications */ + unsigned long *notes_bitmap; /* Allocation bitmap for notes */ + struct kref usage; /* Object usage count */ + spinlock_t lock; + unsigned int nr_notes; /* Number of notes */ + unsigned int nr_pages; /* Number of pages in notes[] */ + bool defunct; /* T when queues closed */ +}; + +/* + * Representation of a watch on an object. + */ +struct watch { + union { + struct rcu_head rcu; + u32 info_id; /* ID to be OR'd in to info field */ + }; + struct watch_queue __rcu *queue; /* Queue to post events to */ + struct hlist_node queue_node; /* Link in queue->watches */ + struct watch_list __rcu *watch_list; + struct hlist_node list_node; /* Link in watch_list->watchers */ + const struct cred *cred; /* Creds of the owner of the watch */ + void *private; /* Private data for the watched object */ + u64 id; /* Internal identifier */ + struct kref usage; /* Object usage count */ +}; + +/* + * List of watches on an object. + */ +struct watch_list { + struct rcu_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +extern void __post_watch_notification(struct watch_list *, + struct watch_notification *, + const struct cred *, + u64); +extern struct watch_queue *get_watch_queue(int); +extern void put_watch_queue(struct watch_queue *); +extern void init_watch(struct watch *, struct watch_queue *); +extern int add_watch_to_object(struct watch *, struct watch_list *); +extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool); +extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int); +extern long watch_queue_set_filter(struct pipe_inode_info *, + struct watch_notification_filter __user *); +extern int watch_queue_init(struct pipe_inode_info *); +extern void watch_queue_clear(struct watch_queue *); + +static inline void init_watch_list(struct watch_list *wlist, + void (*release_watch)(struct watch *)) +{ + INIT_HLIST_HEAD(&wlist->watchers); + spin_lock_init(&wlist->lock); + wlist->release_watch = release_watch; +} + +static inline void post_watch_notification(struct watch_list *wlist, + struct watch_notification *n, + const struct cred *cred, + u64 id) +{ + if (unlikely(wlist)) + __post_watch_notification(wlist, n, cred, id); +} + +static inline void remove_watch_list(struct watch_list *wlist, u64 id) +{ + if (wlist) { + remove_watch_from_object(wlist, NULL, id, true); + kfree_rcu(wlist, rcu); + } +} + +/** + * watch_sizeof - Calculate the information part of the size of a watch record, + * given the structure size. + */ +#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT) + +#endif + +#endif /* _LINUX_WATCH_QUEUE_H */ |