lkml.org 
[lkml]   [2021]   [Sep]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 9/9] virtio_ring: validate used buffer length
On Mon, Sep 13, 2021 at 01:53:53PM +0800, Jason Wang wrote:
> This patch validate the used buffer length provided by the device
> before trying to use it. This is done by record the in buffer length
> in a new field in desc_state structure during virtqueue_add(), then we
> can fail the virtqueue_get_buf() when we find the device is trying to
> give us a used buffer length which is greater than the in buffer
> length.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>

Hmm this was proposed in the past. The overhead here is
not negligeable, so I'd like to know more -
when is it a problem if the used len is too big?
Don't the affected drivers already track the length somewhere
and so can validated it without the extra cost in
virtio core?

> ---
> drivers/virtio/virtio_ring.c | 23 +++++++++++++++++++++++
> 1 file changed, 23 insertions(+)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index d2ca0a7365f8..b8374a6144f3 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -69,6 +69,7 @@
> struct vring_desc_state_split {
> void *data; /* Data for callback. */
> struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
> + u64 buflen; /* In buffer length */
> };
>
> struct vring_desc_state_packed {
> @@ -76,6 +77,7 @@ struct vring_desc_state_packed {
> struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
> u16 num; /* Descriptor list length. */
> u16 last; /* The last desc state in a list. */
> + u64 buflen; /* In buffer length */
> };
>
> struct vring_desc_extra {
> @@ -490,6 +492,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> unsigned int i, n, avail, descs_used, prev, err_idx;
> int head;
> bool indirect;
> + u64 buflen = 0;
>
> START_USE(vq);
>
> @@ -571,6 +574,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> VRING_DESC_F_NEXT |
> VRING_DESC_F_WRITE,
> indirect);
> + buflen += sg->length;
> }
> }
> /* Last one doesn't continue. */
> @@ -605,6 +609,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>
> /* Store token and indirect buffer state. */
> vq->split.desc_state[head].data = data;
> + vq->split.desc_state[head].buflen = buflen;
> if (indirect)
> vq->split.desc_state[head].indir_desc = desc;
> else
> @@ -784,6 +789,11 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
> BAD_RING(vq, "id %u is not a head!\n", i);
> return NULL;
> }
> + if (unlikely(*len > vq->split.desc_state[i].buflen)) {
> + BAD_RING(vq, "used len %d is larger than in buflen %lld\n",
> + *len, vq->split.desc_state[i].buflen);
> + return NULL;
> + }
>
> /* detach_buf_split clears data, so grab it now. */
> ret = vq->split.desc_state[i].data;
> @@ -1062,6 +1072,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> unsigned int i, n, err_idx;
> u16 head, id;
> dma_addr_t addr;
> + u64 buflen = 0;
>
> head = vq->packed.next_avail_idx;
> desc = alloc_indirect_packed(total_sg, gfp);
> @@ -1089,6 +1100,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> desc[i].addr = cpu_to_le64(addr);
> desc[i].len = cpu_to_le32(sg->length);
> i++;
> + if (n >= out_sgs)
> + buflen += sg->length;
> }
> }
>
> @@ -1141,6 +1154,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> vq->packed.desc_state[id].data = data;
> vq->packed.desc_state[id].indir_desc = desc;
> vq->packed.desc_state[id].last = id;
> + vq->packed.desc_state[id].buflen = buflen;
>
> vq->num_added += 1;
>
> @@ -1176,6 +1190,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
> unsigned int i, n, c, descs_used, err_idx;
> __le16 head_flags, flags;
> u16 head, id, prev, curr, avail_used_flags;
> + u64 buflen = 0;
>
> START_USE(vq);
>
> @@ -1250,6 +1265,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
> 1 << VRING_PACKED_DESC_F_AVAIL |
> 1 << VRING_PACKED_DESC_F_USED;
> }
> + if (n >= out_sgs)
> + buflen += sg->length;
> }
> }
>
> @@ -1268,6 +1285,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
> vq->packed.desc_state[id].data = data;
> vq->packed.desc_state[id].indir_desc = ctx;
> vq->packed.desc_state[id].last = prev;
> + vq->packed.desc_state[id].buflen = buflen;
>
> /*
> * A driver MUST NOT make the first descriptor in the list
> @@ -1455,6 +1473,11 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
> BAD_RING(vq, "id %u is not a head!\n", id);
> return NULL;
> }
> + if (unlikely(*len > vq->packed.desc_state[id].buflen)) {
> + BAD_RING(vq, "used len %d is larger than in buflen %lld\n",
> + *len, vq->packed.desc_state[id].buflen);
> + return NULL;
> + }
>
> /* detach_buf_packed clears data, so grab it now. */
> ret = vq->packed.desc_state[id].data;
> --
> 2.25.1

\
 
 \ /
  Last update: 2021-09-13 08:37    [W:0.181 / U:1.280 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site