summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorBob Liu <bob.liu@oracle.com>2019-11-13 18:06:25 +0800
committerJens Axboe <axboe@kernel.dk>2019-11-13 19:41:01 -0700
commit9d858b21483981db9c0cb4b184d4cdeb4bc525c2 (patch)
treec7d396dbc7ad5452ff687dda066d66660f8d150f /fs
parent2f6d9b9d6357ede64a29437676884ee263039910 (diff)
io_uring: introduce req_need_defer()
Makes the code easier to read. Signed-off-by: Bob Liu <bob.liu@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 297b9e80dc5c..9500780bcaea 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -448,7 +448,7 @@ err:
return NULL;
}
-static inline bool __io_sequence_defer(struct io_kiocb *req)
+static inline bool __req_need_defer(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -456,12 +456,12 @@ static inline bool __io_sequence_defer(struct io_kiocb *req)
+ atomic_read(&ctx->cached_cq_overflow);
}
-static inline bool io_sequence_defer(struct io_kiocb *req)
+static inline bool req_need_defer(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
- return false;
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+ return __req_need_defer(req);
- return __io_sequence_defer(req);
+ return false;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -469,7 +469,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
- if (req && !io_sequence_defer(req)) {
+ if (req && !req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
@@ -482,7 +482,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
- if (req && !__io_sequence_defer(req)) {
+ if (req && !__req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
@@ -2436,7 +2436,8 @@ static int io_req_defer(struct io_kiocb *req)
struct io_uring_sqe *sqe_copy;
struct io_ring_ctx *ctx = req->ctx;
- if (!io_sequence_defer(req) && list_empty(&ctx->defer_list))
+ /* Still need defer if there is pending req in defer list. */
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -2444,7 +2445,7 @@ static int io_req_defer(struct io_kiocb *req)
return -EAGAIN;
spin_lock_irq(&ctx->completion_lock);
- if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) {
+ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy);
return 0;