lkml.org 
[lkml]   [2021]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 5.10 143/215] io_uring: convert io_buffer_idr to XArray
Date
From: Jens Axboe <axboe@kernel.dk>

commit 9e15c3a0ced5a61f320b989072c24983cb1620c1 upstream.

Like we did for the personality idr, convert the IO buffer idr to use
XArray. This avoids a use-after-free on removal of entries, since idr
doesn't like doing so from inside an iterator, and it nicely reduces
the amount of code we need to support this feature.

Fixes: 5a2e745d4d43 ("io_uring: buffer registration infrastructure")
Cc: stable@vger.kernel.org
Cc: Matthew Wilcox <willy@infradead.org>
Cc: yangerkun <yangerkun@huawei.com>
Reported-by: Hulk Robot <hulkci@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
fs/io_uring.c | 43 +++++++++++++++----------------------------
1 file changed, 15 insertions(+), 28 deletions(-)

--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -344,7 +344,7 @@ struct io_ring_ctx {
struct socket *ring_sock;
#endif

- struct idr io_buffer_idr;
+ struct xarray io_buffers;

struct xarray personalities;
u32 pers_next;
@@ -1212,7 +1212,7 @@ static struct io_ring_ctx *io_ring_ctx_a
INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->ref_comp);
init_completion(&ctx->sq_thread_comp);
- idr_init(&ctx->io_buffer_idr);
+ xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
@@ -2990,7 +2990,7 @@ static struct io_buffer *io_buffer_selec

lockdep_assert_held(&req->ctx->uring_lock);

- head = idr_find(&req->ctx->io_buffer_idr, bgid);
+ head = xa_load(&req->ctx->io_buffers, bgid);
if (head) {
if (!list_empty(&head->list)) {
kbuf = list_last_entry(&head->list, struct io_buffer,
@@ -2998,7 +2998,7 @@ static struct io_buffer *io_buffer_selec
list_del(&kbuf->list);
} else {
kbuf = head;
- idr_remove(&req->ctx->io_buffer_idr, bgid);
+ xa_erase(&req->ctx->io_buffers, bgid);
}
if (*len > kbuf->len)
*len = kbuf->len;
@@ -3960,7 +3960,7 @@ static int __io_remove_buffers(struct io
}
i++;
kfree(buf);
- idr_remove(&ctx->io_buffer_idr, bgid);
+ xa_erase(&ctx->io_buffers, bgid);

return i;
}
@@ -3978,7 +3978,7 @@ static int io_remove_buffers(struct io_k
lockdep_assert_held(&ctx->uring_lock);

ret = -ENOENT;
- head = idr_find(&ctx->io_buffer_idr, p->bgid);
+ head = xa_load(&ctx->io_buffers, p->bgid);
if (head)
ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
if (ret < 0)
@@ -4069,21 +4069,14 @@ static int io_provide_buffers(struct io_

lockdep_assert_held(&ctx->uring_lock);

- list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
+ list = head = xa_load(&ctx->io_buffers, p->bgid);

ret = io_add_buffers(p, &head);
- if (ret < 0)
- goto out;
-
- if (!list) {
- ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
- GFP_KERNEL);
- if (ret < 0) {
+ if (ret >= 0 && !list) {
+ ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
+ if (ret < 0)
__io_remove_buffers(ctx, head, p->bgid, -1U);
- goto out;
- }
}
-out:
if (ret < 0)
req_set_fail_links(req);

@@ -8411,19 +8404,13 @@ static int io_eventfd_unregister(struct
return -ENXIO;
}

-static int __io_destroy_buffers(int id, void *p, void *data)
-{
- struct io_ring_ctx *ctx = data;
- struct io_buffer *buf = p;
-
- __io_remove_buffers(ctx, buf, id, -1U);
- return 0;
-}
-
static void io_destroy_buffers(struct io_ring_ctx *ctx)
{
- idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
- idr_destroy(&ctx->io_buffer_idr);
+ struct io_buffer *buf;
+ unsigned long index;
+
+ xa_for_each(&ctx->io_buffers, index, buf)
+ __io_remove_buffers(ctx, buf, index, -1U);
}

static void io_ring_ctx_free(struct io_ring_ctx *ctx)

\
 
 \ /
  Last update: 2021-07-15 21:09    [W:1.045 / U:0.132 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site