lkml.org 
[lkml]   [2022]   [Jan]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RESEND 2/3] blk-mq: record how many hctx failed to get driver tag while sharing a tag set
Date
The hctx will be recorded when getting driver tag failed, and will be
cleared when hctx becomes idle. The clearing is too late and can be
optimized, however, let's just use the easy way here for now because
clearing it in time seems rather complicated.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-mq-tag.c | 8 +++++++-
block/blk-mq-tag.h | 6 +++---
block/blk-mq.c | 13 ++++++++++---
3 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 77c723bdfd5c..d4d212c6c32e 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -163,8 +163,11 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (tag != BLK_MQ_NO_TAG)
goto found_tag;

- if (data->flags & BLK_MQ_REQ_NOWAIT)
+ if (data->flags & BLK_MQ_REQ_NOWAIT) {
+ if (!data->q->elevator)
+ blk_mq_dtag_wait(data->hctx);
return BLK_MQ_NO_TAG;
+ }

ws = bt_wait_ptr(bt, data->hctx);
do {
@@ -191,6 +194,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (tag != BLK_MQ_NO_TAG)
break;

+ if (!data->q->elevator)
+ blk_mq_dtag_wait(data->hctx);
+
bt_prev = bt;
io_schedule();

diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 3fe013aee9a2..d5f98a3e6f91 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -47,9 +47,9 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
};

-extern bool __blk_mq_tag_wait(struct blk_mq_hw_ctx *hctx);
+extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx);
-extern void __blk_mq_dtag_busy(struct blk_mq_hw_ctx *hctx);
+extern void __blk_mq_dtag_wait(struct blk_mq_hw_ctx *hctx);
extern void __blk_mq_dtag_idle(struct blk_mq_hw_ctx *hctx);

static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
@@ -57,7 +57,7 @@ static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return false;

- return __blk_mq_tag_wait(hctx);
+ return __blk_mq_tag_busy(hctx);
}

static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d73bc219a7fa..8d90e686ee8b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1471,8 +1471,10 @@ static void blk_mq_timeout_work(struct work_struct *work)
*/
queue_for_each_hw_ctx(q, hctx, i) {
/* the hctx may be unmapped, so check it here */
- if (blk_mq_hw_queue_mapped(hctx))
+ if (blk_mq_hw_queue_mapped(hctx)) {
blk_mq_tag_idle(hctx);
+ blk_mq_dtag_idle(hctx);
+ }
}
}
blk_queue_exit(q);
@@ -1569,8 +1571,10 @@ static bool __blk_mq_alloc_driver_tag(struct request *rq)
}

tag = __sbitmap_queue_get(bt);
- if (tag == BLK_MQ_NO_TAG)
+ if (tag == BLK_MQ_NO_TAG) {
+ blk_mq_dtag_wait(rq->mq_hctx);
return false;
+ }

rq->tag = tag + tag_offset;
return true;
@@ -3416,8 +3420,10 @@ static void blk_mq_exit_hctx(struct request_queue *q,
{
struct request *flush_rq = hctx->fq->flush_rq;

- if (blk_mq_hw_queue_mapped(hctx))
+ if (blk_mq_hw_queue_mapped(hctx)) {
blk_mq_tag_idle(hctx);
+ blk_mq_dtag_idle(hctx);
+ }

blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
set->queue_depth, flush_rq);
@@ -3743,6 +3749,7 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
} else {
blk_mq_tag_idle(hctx);
+ blk_mq_dtag_idle(hctx);
hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
}
}
--
2.31.1
\
 
 \ /
  Last update: 2022-01-17 09:45    [W:0.051 / U:0.100 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site