diff options
author | Hannes Frederic Sowa <hannes@stressinduktion.org> | 2016-04-05 17:10:15 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-04-07 16:44:14 -0400 |
commit | 1e1d04e678cf72442f57ce82803c7a407769135f (patch) | |
tree | 8ea1054b59c4abe211ffff0b81f34589b0089f22 /include | |
parent | 61881cfb5ad80c1d0a46ca6d08b7e271892b2ff6 (diff) |
net: introduce lockdep_is_held and update various places to use it
The socket is either locked if we hold the slock spin_lock for
lock_sock_fast and unlock_sock_fast or we own the lock (sk_lock.owned
!= 0). Check for this and at the same time improve that the current
thread/cpu is really holding the lock.
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/sock.h | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 91cee51086dc..eb2d7c3e120b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1360,6 +1360,14 @@ do { \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) +static bool lockdep_sock_is_held(const struct sock *csk) +{ + struct sock *sk = (struct sock *)csk; + + return lockdep_is_held(&sk->sk_lock) || + lockdep_is_held(&sk->sk_lock.slock); +} + void lock_sock_nested(struct sock *sk, int subclass); static inline void lock_sock(struct sock *sk) @@ -1598,8 +1606,8 @@ static inline void sk_rethink_txhash(struct sock *sk) static inline struct dst_entry * __sk_dst_get(struct sock *sk) { - return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || - lockdep_is_held(&sk->sk_lock.slock)); + return rcu_dereference_check(sk->sk_dst_cache, + lockdep_sock_is_held(sk)); } static inline struct dst_entry * |