lkml.org 
[lkml]   [2022]   [Jun]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2] scsi: support packing multi-segment in UNMAP command
Date
As SPEC describes that it can support unmapping one or more LBA range
in single UNMAP command, however, previously we only pack one LBA
range in UNMAP command by default no matter device gives the block
limits that says it can support in-batch UNMAP.

This patch tries to set max_discard_segments config according to block
limits of device, and supports in-batch UNMAP.

Signed-off-by: Chao Yu <chao@kernel.org>
---
v2:
- rebase the code.
drivers/scsi/sd.c | 30 +++++++++++++++++++-----------
drivers/scsi/sd.h | 1 +
2 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 895b56c8f25e..143b4eecf657 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -790,6 +790,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
q->limits.discard_granularity =
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
+ blk_queue_max_discard_segments(q, sdkp->max_block_desc_count);
sdkp->provisioning_mode = mode;

switch (mode) {
@@ -835,10 +836,10 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
+ struct bio *bio;
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
- u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
- u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
- unsigned int data_len = 24;
+ unsigned short segments = blk_rq_nr_discard_segments(rq);
+ unsigned int data_len = 8 + 16 * segments, i = 0;
char *buf;

rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
@@ -851,13 +852,20 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)

cmd->cmd_len = 10;
cmd->cmnd[0] = UNMAP;
- cmd->cmnd[8] = 24;
+ cmd->cmnd[8] = data_len;

buf = bvec_virt(&rq->special_vec);
- put_unaligned_be16(6 + 16, &buf[0]);
- put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(lba, &buf[8]);
- put_unaligned_be32(nr_blocks, &buf[16]);
+ put_unaligned_be16(6 + 16 * segments, &buf[0]);
+ put_unaligned_be16(16 * segments, &buf[2]);
+
+ __rq_for_each_bio(bio, rq) {
+ u64 lba = sectors_to_logical(sdp, bio->bi_iter.bi_sector);
+ u32 nr_blocks = sectors_to_logical(sdp, bio_sectors(bio));
+
+ put_unaligned_be64(lba, &buf[8 + 16 * i]);
+ put_unaligned_be32(nr_blocks, &buf[8 + 16 * i + 8]);
+ i++;
+ }

cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
@@ -2862,7 +2870,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);

if (vpd->len >= 64) {
- unsigned int lba_count, desc_count;
+ unsigned int lba_count;

sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);

@@ -2870,9 +2878,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
goto out;

lba_count = get_unaligned_be32(&vpd->data[20]);
- desc_count = get_unaligned_be32(&vpd->data[24]);
+ sdkp->max_block_desc_count = get_unaligned_be32(&vpd->data[24]);

- if (lba_count && desc_count)
+ if (lba_count && sdkp->max_block_desc_count)
sdkp->max_unmap_blocks = lba_count;

sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5eea762f84d1..bda9db5e2322 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -119,6 +119,7 @@ struct scsi_disk {
u32 opt_xfer_blocks;
u32 max_ws_blocks;
u32 max_unmap_blocks;
+ u32 max_block_desc_count;
u32 unmap_granularity;
u32 unmap_alignment;
u32 index;
--
2.25.1
\
 
 \ /
  Last update: 2022-06-15 15:30    [W:0.084 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site