diff options
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.h | 10 | ||||
-rw-r--r-- | io_uring/rsrc.c | 3 | ||||
-rw-r--r-- | io_uring/rw.c | 25 |
3 files changed, 33 insertions, 5 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 9d70b2cf7b1e..70b6675941ff 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -284,7 +284,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx) { struct io_rings *r = ctx->rings; - return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; + /* + * SQPOLL must use the actual sqring head, as using the cached_sq_head + * is race prone if the SQPOLL thread has grabbed entries but not yet + * committed them to the ring. For !SQPOLL, this doesn't matter, but + * since this helper is just used for SQPOLL sqring waits (or POLLOUT), + * just read the actual sqring head unconditionally. + */ + return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; } static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) @@ -320,6 +327,7 @@ static inline int io_run_task_work(void) if (current->io_uring) { unsigned int count = 0; + __set_current_state(TASK_RUNNING); tctx_task_work_run(current->io_uring, UINT_MAX, &count); if (count) ret = true; diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 33a3d156a85b..6f3b6de230bd 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1176,7 +1176,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx for (i = 0; i < nbufs; i++) { struct io_mapped_ubuf *src = src_ctx->user_bufs[i]; - refcount_inc(&src->refs); + if (src != &dummy_ubuf) + refcount_inc(&src->refs); user_bufs[i] = src; } diff --git a/io_uring/rw.c b/io_uring/rw.c index 80ae3c2ebb70..155938f10093 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -807,7 +807,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) * reliably. If not, or it IOCB_NOWAIT is set, don't retry. */ if (kiocb->ki_flags & IOCB_NOWAIT || - ((file->f_flags & O_NONBLOCK && (req->flags & REQ_F_SUPPORT_NOWAIT)))) + ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) req->flags |= REQ_F_NOWAIT; if (ctx->flags & IORING_SETUP_IOPOLL) { @@ -1014,6 +1014,25 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) return IOU_OK; } +static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) +{ + struct inode *inode; + bool ret; + + if (!(req->flags & REQ_F_ISREG)) + return true; + if (!(kiocb->ki_flags & IOCB_NOWAIT)) { + kiocb_start_write(kiocb); + return true; + } + + inode = file_inode(kiocb->ki_filp); + ret = sb_start_write_trylock(inode->i_sb); + if (ret) + __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); + return ret; +} + int io_write(struct io_kiocb *req, unsigned int issue_flags) { bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; @@ -1051,8 +1070,8 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(ret)) return ret; - if (req->flags & REQ_F_ISREG) - kiocb_start_write(kiocb); + if (unlikely(!io_kiocb_start_write(req, kiocb))) + return -EAGAIN; kiocb->ki_flags |= IOCB_WRITE; if (likely(req->file->f_op->write_iter)) |