lkml.org 
[lkml]   [2021]   [Mar]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/2] mm: fs: Invalidate BH LRU during page migration
Date
Pages containing buffer_heads that are in one of the per-CPU
buffer_head LRU caches will be pinned and thus cannot be migrated.
This can prevent CMA allocations from succeeding, which are often used
on platforms with co-processors (such as a DSP) that can only use
physically contiguous memory. It can also prevent memory
hot-unplugging from succeeding, which involves migrating at least
MIN_MEMORY_BLOCK_SIZE bytes of memory, which ranges from 8 MiB to 1
GiB based on the architecture in use.

Correspondingly, invalidate the BH LRU caches before a migration
starts and stop any buffer_head from being cached in the LRU caches,
until migration has finished.

Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
* from prev - https://lore.kernel.org/linux-mm/e8f3e042b902156467a5e978b57c14954213ec59.1611642039.git.cgoldswo@codeaurora.org/
* consolidate bh_lru drain logic into lru pagevec - willy

fs/buffer.c | 12 ++++++++++--
include/linux/buffer_head.h | 2 ++
include/linux/swap.h | 1 +
mm/swap.c | 5 ++++-
4 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 96c7604f69b3..4492e9d4c9d3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1301,6 +1301,14 @@ static void bh_lru_install(struct buffer_head *bh)
int i;

check_irqs_on();
+ /*
+ * buffer_head in bh_lru could increase refcount of the page
+ * until it will be invalidated. It causes page migraion failure.
+ * Skip putting upcoming bh into bh_lru until migration is done.
+ */
+ if (lru_cache_disabled())
+ return;
+
bh_lru_lock();

b = this_cpu_ptr(&bh_lrus);
@@ -1446,7 +1454,7 @@ EXPORT_SYMBOL(__bread_gfp);
* This doesn't race because it runs in each cpu either in irq
* or with preempt disabled.
*/
-static void invalidate_bh_lru(void *arg)
+void invalidate_bh_lru(void *arg)
{
struct bh_lru *b = &get_cpu_var(bh_lrus);
int i;
@@ -1458,7 +1466,7 @@ static void invalidate_bh_lru(void *arg)
put_cpu_var(bh_lrus);
}

-static bool has_bh_in_lru(int cpu, void *dummy)
+bool has_bh_in_lru(int cpu, void *dummy)
{
struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
int i;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6b47f94378c5..3d98bdabaac9 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -194,6 +194,8 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
+void invalidate_bh_lru(void *);
+bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8ab7ad7157f3..94a77e618dba 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -341,6 +341,7 @@ extern void lru_cache_add(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_cache_disable(void);
extern void lru_cache_enable(void);
+extern bool lru_cache_disabled(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
diff --git a/mm/swap.c b/mm/swap.c
index c1fa6cac04c1..88d51b9ebc8c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -36,6 +36,7 @@
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
#include <linux/local_lock.h>
+#include <linux/buffer_head.h>

#include "internal.h"

@@ -667,6 +668,7 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn);

activate_page_drain(cpu);
+ invalidate_bh_lru(NULL);
}

/**
@@ -854,7 +856,8 @@ void lru_add_drain_all(bool force_all_cpus)
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
- need_activate_page_drain(cpu)) {
+ need_activate_page_drain(cpu) ||
+ has_bh_in_lru(cpu, NULL)) {
INIT_WORK(work, lru_add_drain_per_cpu);
queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work);
--
2.30.1.766.gb4fecdf3b7-goog
\
 
 \ /
  Last update: 2021-03-03 03:41    [W:0.110 / U:0.644 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site