lkml.org 
[lkml]   [2022]   [Apr]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH -next RFC v2 5/8] sbitmap: force tag preemption if free tags are sufficient
Date
Now that tag preemption is disabled, if wakers doesn't use up
'wake_batch' tags while preemption is still disabled, io concurrency
will be declined.

To fix the problem, add a detection before wake up, and force tag
preemption is free tags are sufficient, so that the extra tags can be
used by new io.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-mq-tag.c | 3 ++-
include/linux/sbitmap.h | 2 ++
lib/sbitmap.c | 11 +++++++++++
3 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index be2d49e6d69e..dfbb06edfbc3 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -131,7 +131,8 @@ static inline bool preempt_tag(struct blk_mq_alloc_data *data,
struct sbitmap_queue *bt)
{
return data->preemption ||
- atomic_read(&bt->ws_active) <= SBQ_WAIT_QUEUES;
+ atomic_read(&bt->ws_active) <= SBQ_WAIT_QUEUES ||
+ bt->force_tag_preemption;
}

unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 8a64271d0696..ca00ccb6af48 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -143,6 +143,8 @@ struct sbitmap_queue {
* sbitmap_queue_get_shallow()
*/
unsigned int min_shallow_depth;
+
+ bool force_tag_preemption;
};

/**
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 176fba0252d7..8d01e02ea4b1 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -434,6 +434,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
+ sbq->force_tag_preemption = true;

sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
@@ -604,6 +605,15 @@ static void sbq_update_wake_index(struct sbitmap_queue *sbq,
atomic_cmpxchg(&sbq->wake_index, old_wake_index, index);
}

+static inline void sbq_update_preemption(struct sbitmap_queue *sbq,
+ unsigned int wake_batch)
+{
+ bool force = (sbq->sb.depth - sbitmap_weight(&sbq->sb)) >=
+ wake_batch << 1;
+
+ WRITE_ONCE(sbq->force_tag_preemption, force);
+}
+
static bool __sbq_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
@@ -637,6 +647,7 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
*/
smp_mb__before_atomic();
atomic_set(&ws->wait_cnt, wake_batch);
+ sbq_update_preemption(sbq, wake_batch);
wake_up_nr(&ws->wait, wake_batch);

return true;
--
2.31.1
\
 
 \ /
  Last update: 2022-04-08 09:27    [W:0.192 / U:1.632 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site