summaryrefslogtreecommitdiff
path: root/net/rds/tcp_recv.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-05 13:39:19 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-05 13:39:19 -0500
commitad521763e6060b3cb11df572cab3339526cd82c0 (patch)
tree67f61aa4784da3fba56aa43e3626d3f3b6562c78 /net/rds/tcp_recv.c
parenteb9aa1bfbad8c9fc280adf43cb480911295cfa3f (diff)
parent3db6e0d172c94bd9953a1347c55ffb64b1d2e74f (diff)
Merge branch 'rds-use-RCU-between-work-enqueue-and-connection-teardown'
Sowmini Varadhan says: ==================== rds: use RCU between work-enqueue and connection teardown This patchset follows up on the root-cause mentioned in https://www.spinics.net/lists/netdev/msg472849.html Patch1 implements some code refactoring that was suggeseted as an enhancement in http://patchwork.ozlabs.org/patch/843157/ It replaces the c_destroy_in_prog bit in rds_connection with an atomically managed flag in rds_conn_path. Patch2 builds on Patch1 and uses RCU to make sure that work is only enqueued if the connection destroy is not already in progress: the test-flag-and-enqueue is done under rcu_read_lock, while destroy first sets the flag, uses synchronize_rcu to wait for existing reader threads to complete, and then starts all the work-cancellation. Since I have not been able to reproduce the original stack traces reported by syszbot, and these are fixes for a race condition that are based on code-inspection I am not marking these as reported-by at this time. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/tcp_recv.c')
-rw-r--r--net/rds/tcp_recv.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e006ef8e6d40..dd707b9e73e5 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -321,8 +321,12 @@ void rds_tcp_data_ready(struct sock *sk)
ready = tc->t_orig_data_ready;
rds_tcp_stats_inc(s_tcp_data_ready_calls);
- if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM)
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ rcu_read_unlock();
+ }
out:
read_unlock_bh(&sk->sk_callback_lock);
ready(sk);