lkml.org 
[lkml]   [2022]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH v6 06/11] 9p/trans_virtio: resize sg lists to whatever is possible
Right now vq_sg_resize() used a lazy implementation following
the all-or-nothing princible. So it either resized exactly to
the requested new amount of sg lists, or it did not resize at
all.

The problem with this is if a user supplies a very large msize
value, resize would simply fail and the user would stick to
the default maximum msize supported by the virtio transport.

To resolve this potential issue, change vq_sg_resize() to resize
the passed sg list to whatever is possible on the machine.

Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---

As previously discussed in v5, this patch could probably be dropped.

net/9p/trans_virtio.c | 76 +++++++++++++++++++++++++++++++++++--------
1 file changed, 63 insertions(+), 13 deletions(-)

diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 921caa022570..52d00cb3c105 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -218,24 +218,67 @@ static struct virtqueue_sg *vq_sg_alloc(unsigned int nsgl)
* amount of lists
* @_vq_sg: scatter/gather lists to be resized
* @nsgl: new amount of scatter/gather lists
+ *
+ * Old scatter/gather lists are retained. Only growing the size is supported.
+ * If the requested amount cannot be satisfied, then lists are increased to
+ * whatever is possible.
*/
static int vq_sg_resize(struct virtqueue_sg **_vq_sg, unsigned int nsgl)
{
struct virtqueue_sg *vq_sg;
+ unsigned int i;
+ size_t sz;
+ int ret = 0;

BUG_ON(!_vq_sg || !nsgl);
vq_sg = *_vq_sg;
+ if (nsgl > VIRTQUEUE_SG_NSGL_MAX)
+ nsgl = VIRTQUEUE_SG_NSGL_MAX;
if (vq_sg->nsgl == nsgl)
return 0;
+ if (vq_sg->nsgl > nsgl)
+ return -ENOTSUPP;
+
+ vq_sg = kzalloc(sizeof(struct virtqueue_sg) +
+ nsgl * sizeof(struct scatterlist *),
+ GFP_KERNEL);

- /* lazy resize implementation for now */
- vq_sg = vq_sg_alloc(nsgl);
if (!vq_sg)
return -ENOMEM;

+ /* copy over old scatter gather lists */
+ sz = sizeof(struct virtqueue_sg) +
+ (*_vq_sg)->nsgl * sizeof(struct scatterlist *);
+ memcpy(vq_sg, *_vq_sg, sz);
+
+ vq_sg->nsgl = nsgl;
+
+ for (i = (*_vq_sg)->nsgl; i < nsgl; ++i) {
+ vq_sg->sgl[i] = kmalloc_array(
+ SG_MAX_SINGLE_ALLOC, sizeof(struct scatterlist),
+ GFP_KERNEL
+ );
+ /*
+ * handle failed allocation as soft error, we take whatever
+ * we get
+ */
+ if (!vq_sg->sgl[i]) {
+ ret = -ENOMEM;
+ vq_sg->nsgl = nsgl = i;
+ break;
+ }
+ sg_init_table(vq_sg->sgl[i], SG_MAX_SINGLE_ALLOC);
+ if (i) {
+ /* chain the lists */
+ sg_chain(vq_sg->sgl[i - 1], SG_MAX_SINGLE_ALLOC,
+ vq_sg->sgl[i]);
+ }
+ }
+ sg_mark_end(&vq_sg->sgl[nsgl - 1][SG_MAX_SINGLE_ALLOC - 1]);
+
kfree(*_vq_sg);
*_vq_sg = vq_sg;
- return 0;
+ return ret;
}

/**
@@ -860,16 +903,23 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
* if resize fails, no big deal, then just continue with
* whatever we got
*/
- if (!vq_sg_resize(&chan->vq_sg, nsgl)) {
- /*
- * decrement 2 pages as both 9p request and 9p reply have
- * to fit into the virtio round-trip message
- */
- client->trans_maxsize =
- PAGE_SIZE *
- clamp_t(int,
- (nsgl * SG_USER_PAGES_PER_LIST) - 2,
- 0, VIRTIO_MAX_DESCRIPTORS - 2);
+ vq_sg_resize(&chan->vq_sg, nsgl);
+ /*
+ * actual allocation size might be less than requested, so use
+ * vq_sg->nsgl instead of nsgl, and decrement 2 pages as both
+ * 9p request and 9p reply have to fit into the virtio
+ * round-trip message
+ */
+ client->trans_maxsize =
+ PAGE_SIZE *
+ clamp_t(int,
+ (chan->vq_sg->nsgl * SG_USER_PAGES_PER_LIST) - 2,
+ 0, VIRTIO_MAX_DESCRIPTORS - 2);
+ if (nsgl > chan->vq_sg->nsgl) {
+ pr_info("limiting 'msize' to %d as only %d "
+ "of %zu SG lists could be allocated",
+ client->trans_maxsize,
+ chan->vq_sg->nsgl, nsgl);
}
}
#endif /* CONFIG_ARCH_NO_SG_CHAIN */
--
2.30.2
\
 
 \ /
  Last update: 2022-07-16 01:27    [W:0.098 / U:1.932 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site