lkml.org 
[lkml]   [2022]   [Feb]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v10 7/9] mm: Introduce mf_dax_kill_procs() for fsdax case
On Thu, Jan 27, 2022 at 4:41 AM Shiyang Ruan <ruansy.fnst@fujitsu.com> wrote:
>
> This function is called at the end of RMAP routine, i.e. filesystem
> recovery function, to collect and kill processes using a shared page of
> DAX file. The difference with mf_generic_kill_procs() is, it accepts
> file's (mapping,offset) instead of struct page because different files'
> mappings and offsets may share the same page in fsdax mode.
> It will be called when filesystem's RMAP results are found.
>
> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> ---
> include/linux/mm.h | 4 ++
> mm/memory-failure.c | 91 +++++++++++++++++++++++++++++++++++++++------
> 2 files changed, 84 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 9b1d56c5c224..0420189e4788 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3195,6 +3195,10 @@ enum mf_flags {
> MF_SOFT_OFFLINE = 1 << 3,
> MF_UNPOISON = 1 << 4,
> };
> +#if IS_ENABLED(CONFIG_FS_DAX)
> +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
> + unsigned long count, int mf_flags);
> +#endif /* CONFIG_FS_DAX */
> extern int memory_failure(unsigned long pfn, int flags);
> extern void memory_failure_queue(unsigned long pfn, int flags);
> extern void memory_failure_queue_kick(int cpu);
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index b2d13eba1071..8d123cc4102e 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -304,10 +304,9 @@ void shake_page(struct page *p)
> }
> EXPORT_SYMBOL_GPL(shake_page);
>
> -static unsigned long dev_pagemap_mapping_shift(struct page *page,
> - struct vm_area_struct *vma)
> +static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
> + unsigned long address)
> {
> - unsigned long address = vma_address(page, vma);
> unsigned long ret = 0;
> pgd_t *pgd;
> p4d_t *p4d;
> @@ -347,9 +346,8 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
> * Schedule a process for later kill.
> * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
> */
> -static void add_to_kill(struct task_struct *tsk, struct page *p,
> - struct vm_area_struct *vma,
> - struct list_head *to_kill)
> +static void add_to_kill(struct task_struct *tsk, struct page *p, pgoff_t pgoff,
> + struct vm_area_struct *vma, struct list_head *to_kill)
> {
> struct to_kill *tk;
>
> @@ -360,9 +358,15 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
> }
>
> tk->addr = page_address_in_vma(p, vma);
> - if (is_zone_device_page(p))
> - tk->size_shift = dev_pagemap_mapping_shift(p, vma);
> - else
> + if (is_zone_device_page(p)) {
> + /*
> + * Since page->mapping is not used for fsdax, we need
> + * calculate the address based on the vma.
> + */
> + if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
> + tk->addr = vma_pgoff_address(vma, pgoff);
> + tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
> + } else
> tk->size_shift = page_shift(compound_head(p));
>
> /*
> @@ -510,7 +514,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
> if (!page_mapped_in_vma(page, vma))
> continue;
> if (vma->vm_mm == t->mm)
> - add_to_kill(t, page, vma, to_kill);
> + add_to_kill(t, page, 0, vma, to_kill);
> }
> }
> read_unlock(&tasklist_lock);
> @@ -546,12 +550,40 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
> * to be informed of all such data corruptions.
> */
> if (vma->vm_mm == t->mm)
> - add_to_kill(t, page, vma, to_kill);
> + add_to_kill(t, page, 0, vma, to_kill);
> + }
> + }
> + read_unlock(&tasklist_lock);
> + i_mmap_unlock_read(mapping);
> +}
> +
> +#if IS_ENABLED(CONFIG_FS_DAX)
> +/*
> + * Collect processes when the error hit a fsdax page.
> + */
> +static void collect_procs_fsdax(struct page *page,
> + struct address_space *mapping, pgoff_t pgoff,
> + struct list_head *to_kill)
> +{
> + struct vm_area_struct *vma;
> + struct task_struct *tsk;
> +
> + i_mmap_lock_read(mapping);
> + read_lock(&tasklist_lock);
> + for_each_process(tsk) {
> + struct task_struct *t = task_early_kill(tsk, true);
> +
> + if (!t)
> + continue;
> + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
> + if (vma->vm_mm == t->mm)
> + add_to_kill(t, page, pgoff, vma, to_kill);
> }
> }
> read_unlock(&tasklist_lock);
> i_mmap_unlock_read(mapping);
> }
> +#endif /* CONFIG_FS_DAX */
>
> /*
> * Collect the processes who have the corrupted page mapped to kill.
> @@ -1574,6 +1606,43 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
> return 0;
> }
>
> +#if IS_ENABLED(CONFIG_FS_DAX)
> +/**
> + * mf_dax_kill_procs - Collect and kill processes who are using this file range
> + * @mapping: the file in use
> + * @index: start pgoff of the range within the file
> + * @count: length of the range, in unit of PAGE_SIZE
> + * @mf_flags: memory failure flags
> + */
> +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
> + unsigned long count, int mf_flags)
> +{
> + LIST_HEAD(to_kill);
> + int rc;
> + struct page *page;
> + size_t end = index + count;
> +
> + mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
> +
> + for (; index < end; index++) {
> + page = NULL;
> + rc = dax_load_page(mapping, index, &page);
> + if (rc)
> + return rc;
> + if (!page)
> + continue;
> +
> + SetPageHWPoison(page);
> +
> + collect_procs_fsdax(page, mapping, index, &to_kill);
> + unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
> + index, mf_flags);

Depending on the answer to the question in patch5 there may need to be
a put_page() or dax_unlock_page() here.

\
 
 \ /
  Last update: 2022-02-16 02:50    [W:0.300 / U:0.480 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site