summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-10 00:03:08 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-09 19:15:14 -0700
commit61e98203047983fd959cfef889b328a57315847c (patch)
treecb78d5969d3d4c7fa42e193ffbba77f83092139f /fs
parent45d189c6062922ffe272e98013ba464b355dede7 (diff)
io_uring: make op handlers always take issue flags
Make opcode handler interfaces a bit more consistent by always passing in issue flags. Bulky but pretty easy and mechanical change. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 862121c48cee..ac233d04ee71 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3917,7 +3917,8 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
+static int io_nop(struct io_kiocb *req, unsigned int issue_flags,
+ struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -5581,7 +5582,7 @@ static int io_poll_remove_prep(struct io_kiocb *req,
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
*/
-static int io_poll_remove(struct io_kiocb *req)
+static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -5632,7 +5633,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0;
}
-static int io_poll_add(struct io_kiocb *req)
+static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -5772,7 +5773,7 @@ static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
/*
* Remove or update an existing timeout command
*/
-static int io_timeout_remove(struct io_kiocb *req)
+static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_timeout_rem *tr = &req->timeout_rem;
struct io_ring_ctx *ctx = req->ctx;
@@ -5828,7 +5829,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_timeout(struct io_kiocb *req)
+static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data = req->async_data;
@@ -5951,7 +5952,7 @@ static int io_async_cancel_prep(struct io_kiocb *req,
return 0;
}
-static int io_async_cancel(struct io_kiocb *req)
+static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -6211,7 +6212,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
switch (req->opcode) {
case IORING_OP_NOP:
- ret = io_nop(req, cs);
+ ret = io_nop(req, issue_flags, cs);
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
@@ -6227,10 +6228,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
ret = io_fsync(req, issue_flags);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req);
+ ret = io_poll_add(req, issue_flags);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove(req);
+ ret = io_poll_remove(req, issue_flags);
break;
case IORING_OP_SYNC_FILE_RANGE:
ret = io_sync_file_range(req, issue_flags);
@@ -6248,10 +6249,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
ret = io_recv(req, issue_flags, cs);
break;
case IORING_OP_TIMEOUT:
- ret = io_timeout(req);
+ ret = io_timeout(req, issue_flags);
break;
case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove(req);
+ ret = io_timeout_remove(req, issue_flags);
break;
case IORING_OP_ACCEPT:
ret = io_accept(req, issue_flags, cs);
@@ -6260,7 +6261,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags,
ret = io_connect(req, issue_flags, cs);
break;
case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel(req);
+ ret = io_async_cancel(req, issue_flags);
break;
case IORING_OP_FALLOCATE:
ret = io_fallocate(req, issue_flags);