summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-09-22 08:18:24 -0600
committerJens Axboe <axboe@kernel.dk>2020-09-30 20:32:32 -0600
commitf3606e3a92ddd36299642c78592fc87609abb1f6 (patch)
tree8be13f016d0adf2742fae2f1c8dac610b78f51e3
parent0f078896911fc6a421b0e708e910b99a28f8a0fa (diff)
io_uring: allow timeout/poll/files killing to take task into account
We currently cancel these when the ring exits, and we cancel all of them. This is in preparation for killing only the ones associated with a given task. Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index aae0ef2ec34d..867145fb149c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1226,13 +1226,26 @@ static void io_kill_timeout(struct io_kiocb *req)
}
}
-static void io_kill_timeouts(struct io_ring_ctx *ctx)
+static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!tsk || req->task == tsk)
+ return true;
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && req->task == ctx->sqo_thread)
+ return true;
+ return false;
+}
+
+static void io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
- io_kill_timeout(req);
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ if (io_task_match(req, tsk))
+ io_kill_timeout(req);
+ }
spin_unlock_irq(&ctx->completion_lock);
}
@@ -5017,7 +5030,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
return do_complete;
}
-static void io_poll_remove_all(struct io_ring_ctx *ctx)
+static void io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct hlist_node *tmp;
struct io_kiocb *req;
@@ -5028,8 +5041,10 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
struct hlist_head *list;
list = &ctx->cancel_hash[i];
- hlist_for_each_entry_safe(req, tmp, list, hash_node)
- posted += io_poll_remove_one(req);
+ hlist_for_each_entry_safe(req, tmp, list, hash_node) {
+ if (io_task_match(req, tsk))
+ posted += io_poll_remove_one(req);
+ }
}
spin_unlock_irq(&ctx->completion_lock);
@@ -7989,8 +8004,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
- io_kill_timeouts(ctx);
- io_poll_remove_all(ctx);
+ io_kill_timeouts(ctx, NULL);
+ io_poll_remove_all(ctx, NULL);
if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq);
@@ -8221,7 +8236,7 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct task_struct *task = data;
- return req->task == task;
+ return io_task_match(req, task);
}
static int io_uring_flush(struct file *file, void *data)