lkml.org 
[lkml]   [2020]   [Nov]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH RFC 12/12] vdpa_sim_blk: implement ramdisk behaviour
On Mon, Nov 16, 2020 at 01:25:31PM +0800, Jason Wang wrote:
>
>On 2020/11/13 下午9:47, Stefano Garzarella wrote:
>>The previous implementation wrote only the status of each request.
>>This patch implements a more accurate block device simulator,
>>providing a ramdisk-like behavior.
>>
>>Also handle VIRTIO_BLK_T_GET_ID request, always answering the
>>"vdpa_blk_sim" string.
>
>
>Let's use a separate patch for this.
>

Okay, I'll do.

>
>>
>>Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
>>---
>> drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 151 +++++++++++++++++++++++----
>> 1 file changed, 133 insertions(+), 18 deletions(-)
>>
>>diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
>>index 8e41b3ab98d5..68e74383322f 100644
>>--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
>>+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
>>@@ -7,6 +7,7 @@
>> */
>> #include <linux/module.h>
>>+#include <linux/blkdev.h>
>> #include <uapi/linux/virtio_blk.h>
>> #include "vdpa_sim.h"
>>@@ -24,10 +25,137 @@
>> static struct vdpasim *vdpasim_blk_dev;
>>+static int vdpasim_blk_handle_req(struct vdpasim *vdpasim,
>>+ struct vdpasim_virtqueue *vq)
>>+{
>>+ size_t wrote = 0, to_read = 0, to_write = 0;
>>+ struct virtio_blk_outhdr hdr;
>>+ uint8_t status;
>>+ uint32_t type;
>>+ ssize_t bytes;
>>+ loff_t offset;
>>+ int i, ret;
>>+
>>+ vringh_kiov_cleanup(&vq->riov);
>>+ vringh_kiov_cleanup(&vq->wiov);
>
>
>It looks to me we should do those after vringh_get_desc_iotlb()? See
>comment above vringh_getdesc_kern().

Do you mean after the last vringh_iov_push_iotlb()?

Because vringh_kiov_cleanup() will free the allocated iov[].

>
>
>>+
>>+ ret = vringh_getdesc_iotlb(&vq->vring, &vq->riov, &vq->wiov,
>>+ &vq->head, GFP_ATOMIC);
>>+ if (ret != 1)
>>+ return ret;
>>+
>>+ for (i = 0; i < vq->wiov.used; i++)
>>+ to_write += vq->wiov.iov[i].iov_len;
>
>
>It's better to introduce a helper for this (or consider to use iov
>iterator).

Okay, I'll try to find the best solution.

>
>
>>+ to_write -= 1; /* last byte is the status */
>>+
>>+ for (i = 0; i < vq->riov.used; i++)
>>+ to_read += vq->riov.iov[i].iov_len;
>>+
>>+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->riov, &hdr, sizeof(hdr));
>>+ if (bytes != sizeof(hdr))
>>+ return 0;
>>+
>>+ to_read -= bytes;
>>+
>>+ type = le32_to_cpu(hdr.type);
>>+ offset = le64_to_cpu(hdr.sector) << SECTOR_SHIFT;
>>+ status = VIRTIO_BLK_S_OK;
>>+
>>+ switch (type) {
>>+ case VIRTIO_BLK_T_IN:
>>+ if (offset + to_write > VDPASIM_BLK_CAPACITY <<
>>SECTOR_SHIFT) {
>>+ dev_err(&vdpasim->vdpa.dev,
>>+ "reading over the capacity - offset:
>>0x%llx len: 0x%lx\n",
>>+ offset, to_write);
>>+ status = VIRTIO_BLK_S_IOERR;
>>+ break;
>>+ }
>>+
>>+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->wiov,
>>+ vdpasim->buffer + offset,
>>+ to_write);
>>+ if (bytes < 0) {
>>+ dev_err(&vdpasim->vdpa.dev,
>>+ "vringh_iov_push_iotlb() error: %ld offset: 0x%llx len: 0x%lx\n",
>>+ bytes, offset, to_write);
>>+ status = VIRTIO_BLK_S_IOERR;
>>+ break;
>>+ }
>>+
>>+ wrote += bytes;
>>+ break;
>>+
>>+ case VIRTIO_BLK_T_OUT:
>>+ if (offset + to_read > VDPASIM_BLK_CAPACITY << SECTOR_SHIFT) {
>>+ dev_err(&vdpasim->vdpa.dev,
>>+ "writing over the capacity - offset: 0x%llx len: 0x%lx\n",
>>+ offset, to_read);
>>+ status = VIRTIO_BLK_S_IOERR;
>>+ break;
>>+ }
>>+
>>+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->riov,
>>+ vdpasim->buffer + offset,
>>+ to_read);
>>+ if (bytes < 0) {
>>+ dev_err(&vdpasim->vdpa.dev,
>>+ "vringh_iov_pull_iotlb() error: %ld offset: 0x%llx len: 0x%lx\n",
>>+ bytes, offset, to_read);
>>+ status = VIRTIO_BLK_S_IOERR;
>>+ break;
>>+ }
>>+ break;
>>+
>>+ case VIRTIO_BLK_T_GET_ID: {
>>+ char id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
>
>
>Let's use a global static one?

I'll do.

>
>
>>+
>>+ bytes = vringh_iov_push_iotlb(&vq->vring,
>>+ &vq->wiov, id,
>>+ VIRTIO_BLK_ID_BYTES);
>>+ if (bytes < 0) {
>>+ dev_err(&vdpasim->vdpa.dev,
>>+ "vringh_iov_push_iotlb() error: %ld\n", bytes);
>>+ status = VIRTIO_BLK_S_IOERR;
>>+ break;
>>+ }
>>+
>>+ wrote += bytes;
>>+ break;
>>+ }
>>+
>>+ default:
>>+ dev_warn(&vdpasim->vdpa.dev,
>>+ "Unsupported request type %d\n", type);
>>+ status = VIRTIO_BLK_S_IOERR;
>>+ break;
>>+ }
>>+
>>+ /* if VIRTIO_BLK_T_IN or VIRTIO_BLK_T_GET_ID fail, we need to skip
>>+ * the remaining bytes to put the status in the last byte
>>+ */
>>+ if (to_write - wrote > 0) {
>>+ vringh_iov_push_iotlb(&vq->vring, &vq->wiov, NULL,
>>+ to_write - wrote);
>>+ }
>>+
>>+ /* last byte is the status */
>>+ bytes = vringh_iov_push_iotlb(&vq->vring, &vq->wiov, &status, 1);
>>+ if (bytes != 1)
>>+ return 0;
>>+
>>+ wrote += bytes;
>>+
>>+ /* Make sure data is wrote before advancing index */
>>+ smp_wmb();
>>+
>>+ vringh_complete_iotlb(&vq->vring, vq->head, wrote);
>>+
>>+ return ret;
>>+}
>>+
>> static void vdpasim_blk_work(struct work_struct *work)
>> {
>> struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
>>- u8 status = VIRTIO_BLK_S_OK;
>> int i;
>> spin_lock(&vdpasim->lock);
>>@@ -41,21 +169,7 @@ static void vdpasim_blk_work(struct work_struct *work)
>> if (!vq->ready)
>> continue;
>>- while (vringh_getdesc_iotlb(&vq->vring, &vq->riov, &vq->wiov,
>>- &vq->head, GFP_ATOMIC) > 0) {
>>-
>>- int write;
>>-
>>- vq->wiov.i = vq->wiov.used - 1;
>>- write = vringh_iov_push_iotlb(&vq->vring, &vq->wiov, &status, 1);
>>- if (write <= 0)
>>- break;
>>-
>>- /* Make sure data is wrote before advancing
>>index */
>>- smp_wmb();
>>-
>>- vringh_complete_iotlb(&vq->vring, vq->head, write);
>>-
>>+ while (vdpasim_blk_handle_req(vdpasim, vq) > 0) {
>> /* Make sure used is visible before rasing the interrupt. */
>> smp_wmb();
>>@@ -67,6 +181,7 @@ static void vdpasim_blk_work(struct work_struct *work)
>> vq->cb(vq->private);
>> local_bh_enable();
>> }
>>+
>
>
>Unnecessary change.

Removed.

Thanks,
Stefano

\
 
 \ /
  Last update: 2020-11-16 12:13    [W:0.240 / U:0.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site