mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-26 18:38:20 +10:00
io_uring: add struct io_cold_def->sqe_copy() method
Will be called by the core of io_uring, if inline issue is not going to be tried for a request. Opcodes can define this handler to defer copying of SQE data that should remain stable. Only called if IO_URING_F_INLINE is set. If it isn't set, then there's a bug in the core handling of this, and -EFAULT will be returned instead to terminate the request. This will trigger a WARN_ON_ONCE(). Don't expect this to ever trigger, and down the line this can be removed. Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4d811e395b
commit
af19388a97
@ -504,6 +504,7 @@ enum {
|
||||
REQ_F_BUF_NODE_BIT,
|
||||
REQ_F_HAS_METADATA_BIT,
|
||||
REQ_F_IMPORT_BUFFER_BIT,
|
||||
REQ_F_SQE_COPIED_BIT,
|
||||
|
||||
/* not a real bit, just to check we're not overflowing the space */
|
||||
__REQ_F_LAST_BIT,
|
||||
@ -593,6 +594,8 @@ enum {
|
||||
* For SEND_ZC, whether to import buffers (i.e. the first issue).
|
||||
*/
|
||||
REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
|
||||
/* ->sqe_copy() has been called, if necessary */
|
||||
REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, io_tw_token_t tw);
|
||||
|
@ -1938,14 +1938,34 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
|
||||
return file;
|
||||
}
|
||||
|
||||
static void io_queue_async(struct io_kiocb *req, int ret)
|
||||
static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
const struct io_cold_def *def = &io_cold_defs[req->opcode];
|
||||
|
||||
if (req->flags & REQ_F_SQE_COPIED)
|
||||
return 0;
|
||||
req->flags |= REQ_F_SQE_COPIED;
|
||||
if (!def->sqe_copy)
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE)))
|
||||
return -EFAULT;
|
||||
def->sqe_copy(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret)
|
||||
__must_hold(&req->ctx->uring_lock)
|
||||
{
|
||||
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
|
||||
fail:
|
||||
io_req_defer_failed(req, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = io_req_sqe_copy(req, issue_flags);
|
||||
if (unlikely(ret))
|
||||
goto fail;
|
||||
|
||||
switch (io_arm_poll_handler(req, 0)) {
|
||||
case IO_APOLL_READY:
|
||||
io_kbuf_recycle(req, 0);
|
||||
@ -1974,7 +1994,7 @@ static inline void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags)
|
||||
* doesn't support non-blocking read/write attempts
|
||||
*/
|
||||
if (unlikely(ret))
|
||||
io_queue_async(req, ret);
|
||||
io_queue_async(req, issue_flags, ret);
|
||||
}
|
||||
|
||||
static void io_queue_sqe_fallback(struct io_kiocb *req)
|
||||
@ -1989,6 +2009,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
|
||||
req->flags |= REQ_F_LINK;
|
||||
io_req_defer_failed(req, req->cqe.res);
|
||||
} else {
|
||||
/* can't fail with IO_URING_F_INLINE */
|
||||
io_req_sqe_copy(req, IO_URING_F_INLINE);
|
||||
if (unlikely(req->ctx->drain_active))
|
||||
io_drain_req(req);
|
||||
else
|
||||
@ -2200,6 +2222,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
*/
|
||||
if (unlikely(link->head)) {
|
||||
trace_io_uring_link(req, link->last);
|
||||
io_req_sqe_copy(req, IO_URING_F_INLINE);
|
||||
link->last->link = req;
|
||||
link->last = req;
|
||||
|
||||
|
@ -38,6 +38,7 @@ struct io_issue_def {
|
||||
struct io_cold_def {
|
||||
const char *name;
|
||||
|
||||
void (*sqe_copy)(struct io_kiocb *);
|
||||
void (*cleanup)(struct io_kiocb *);
|
||||
void (*fail)(struct io_kiocb *);
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user