diff options
-rw-r--r-- | fs/io_uring.c | 21 |
1 files changed, 6 insertions, 15 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index a170488507cf..30d88f3d42b0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1995,7 +1995,7 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req) return __io_req_find_next(req); } -static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) +static int io_req_task_work_add(struct io_kiocb *req) { struct task_struct *tsk = req->task; struct io_ring_ctx *ctx = req->ctx; @@ -2012,7 +2012,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) * will do the job. */ notify = TWA_NONE; - if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) + if (!(ctx->flags & IORING_SETUP_SQPOLL)) notify = TWA_SIGNAL; ret = task_work_add(tsk, &req->task_work, notify); @@ -2074,7 +2074,7 @@ static void io_req_task_queue(struct io_kiocb *req) init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs); - ret = io_req_task_work_add(req, true); + ret = io_req_task_work_add(req); if (unlikely(ret)) { struct task_struct *tsk; @@ -2196,7 +2196,7 @@ static void io_free_req_deferred(struct io_kiocb *req) int ret; init_task_work(&req->task_work, io_put_req_deferred_cb); - ret = io_req_task_work_add(req, true); + ret = io_req_task_work_add(req); if (unlikely(ret)) { struct task_struct *tsk; @@ -3305,7 +3305,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, /* submit ref gets dropped, acquire a new one */ refcount_inc(&req->refs); - ret = io_req_task_work_add(req, true); + ret = io_req_task_work_add(req); if (unlikely(ret)) { struct task_struct *tsk; @@ -4843,7 +4843,6 @@ struct io_poll_table { static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { - bool twa_signal_ok; int ret; /* for instances that support it check for an event match first: */ @@ -4859,20 +4858,12 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, percpu_ref_get(&req->ctx->refs); /* - * If we using the signalfd wait_queue_head for this wakeup, then - * it's not safe to use TWA_SIGNAL as we could be recursing on the - * tsk->sighand->siglock on doing the wakeup. Should not be needed - * either, as the normal wakeup will suffice. - */ - twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); - - /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead * of executing it. We can't safely execute it anyway, as we may not * have the needed state needed for it anyway. */ - ret = io_req_task_work_add(req, twa_signal_ok); + ret = io_req_task_work_add(req); if (unlikely(ret)) { struct task_struct *tsk; |