lkml.org 
[lkml]   [2022]   [Mar]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 01/11] blk-mq: Add blk_mq_init_queue_ops()
Date
Add an API to allocate a request queue which accepts a custom set of
blk_mq_ops for that request queue.

The reason which we may want custom ops is for queuing requests which we
don't want to go through the normal queuing path.

Signed-off-by: John Garry <john.garry@huawei.com>
---
block/blk-mq.c | 23 +++++++++++++++++------
drivers/md/dm-rq.c | 2 +-
include/linux/blk-mq.h | 5 ++++-
3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f3bf3358a3bb..8ea3447339ca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3858,7 +3858,7 @@ void blk_mq_release(struct request_queue *q)
}

static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
- void *queuedata)
+ void *queuedata, const struct blk_mq_ops *ops)
{
struct request_queue *q;
int ret;
@@ -3867,27 +3867,35 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
if (!q)
return ERR_PTR(-ENOMEM);
q->queuedata = queuedata;
- ret = blk_mq_init_allocated_queue(set, q);
+ ret = blk_mq_init_allocated_queue(set, q, ops);
if (ret) {
blk_cleanup_queue(q);
return ERR_PTR(ret);
}
+
return q;
}

struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
- return blk_mq_init_queue_data(set, NULL);
+ return blk_mq_init_queue_data(set, NULL, NULL);
}
EXPORT_SYMBOL(blk_mq_init_queue);

+struct request_queue *blk_mq_init_queue_ops(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *custom_ops)
+{
+ return blk_mq_init_queue_data(set, NULL, custom_ops);
+}
+EXPORT_SYMBOL(blk_mq_init_queue_ops);
+
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct lock_class_key *lkclass)
{
struct request_queue *q;
struct gendisk *disk;

- q = blk_mq_init_queue_data(set, queuedata);
+ q = blk_mq_init_queue_data(set, queuedata, NULL);
if (IS_ERR(q))
return ERR_CAST(q);

@@ -4010,13 +4018,16 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}

int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q)
+ struct request_queue *q, const struct blk_mq_ops *custom_ops)
{
WARN_ON_ONCE(blk_queue_has_srcu(q) !=
!!(set->flags & BLK_MQ_F_BLOCKING));

/* mark the queue as mq asap */
- q->mq_ops = set->ops;
+ if (custom_ops)
+ q->mq_ops = custom_ops;
+ else
+ q->mq_ops = set->ops;

q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
blk_mq_poll_stats_bkt,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3907950a0ddc..9d93f72a3eec 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -560,7 +560,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
if (err)
goto out_kfree_tag_set;

- err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+ err = blk_mq_init_allocated_queue(md->tag_set, md->queue, NULL);
if (err)
goto out_tag_set;
return 0;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d319ffa59354..e12d17c86c52 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -688,8 +688,11 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
__blk_mq_alloc_disk(set, queuedata, &__key); \
})
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_queue_ops(struct blk_mq_tag_set *,
+ const struct blk_mq_ops *custom_ops);
+
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q);
+ struct request_queue *q, const struct blk_mq_ops *custom_ops);
void blk_mq_unregister_dev(struct device *, struct request_queue *);

int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
--
2.26.2
\
 
 \ /
  Last update: 2022-03-22 11:46    [W:0.432 / U:0.164 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site