lkml.org 
[lkml]   [2024]   [May]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH 2/4] mm/ksm: fix ksm_zero_pages accounting
From
On 2024/5/8 17:55, Chengming Zhou wrote:
> We normally ksm_zero_pages++ in ksmd when page is merged with zero page,
> but ksm_zero_pages-- is done from page tables side, which can't protected
> by the ksmd mutex.
>
> So we can read very exceptional value of ksm_zero_pages in rare cases,
> such as -1, which is very confusing to users.
>
> Fix it by changing to use atomic_long_t, and the same case with the
> mm->ksm_zero_pages.
>

Fixes: e2942062e01d ("ksm: count all zero pages placed by KSM")
Fixes: 6080d19f0704 ("ksm: add ksm zero pages for each process")

> Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
> ---
> fs/proc/base.c | 2 +-
> include/linux/ksm.h | 22 +++++++++++++++++++---
> include/linux/mm_types.h | 2 +-
> mm/ksm.c | 11 +++++------
> 4 files changed, 26 insertions(+), 11 deletions(-)
>
> diff --git a/fs/proc/base.c b/fs/proc/base.c
> index 18550c071d71..72a1acd03675 100644
> --- a/fs/proc/base.c
> +++ b/fs/proc/base.c
> @@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
> mm = get_task_mm(task);
> if (mm) {
> seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
> - seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
> + seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
> seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
> seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
> mmput(mm);
> diff --git a/include/linux/ksm.h b/include/linux/ksm.h
> index 52c63a9c5a9c..bfc2cf756b0d 100644
> --- a/include/linux/ksm.h
> +++ b/include/linux/ksm.h
> @@ -33,16 +33,32 @@ void __ksm_exit(struct mm_struct *mm);
> */
> #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
>
> -extern unsigned long ksm_zero_pages;
> +extern atomic_long_t ksm_zero_pages;
> +
> +static inline void ksm_map_zero_page(struct mm_struct *mm)
> +{
> + atomic_long_inc(&ksm_zero_pages);
> + atomic_long_inc(&mm->ksm_zero_pages);
> +}
>
> static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
> {
> if (is_ksm_zero_pte(pte)) {
> - ksm_zero_pages--;
> - mm->ksm_zero_pages--;
> + atomic_long_dec(&ksm_zero_pages);
> + atomic_long_dec(&mm->ksm_zero_pages);
> }
> }
>
> +static inline long get_ksm_zero_pages(void)
> +{
> + return atomic_long_read(&ksm_zero_pages);
> +}
> +
> +static inline long mm_ksm_zero_pages(struct mm_struct *mm)
> +{
> + return atomic_long_read(&mm->ksm_zero_pages);
> +}
> +
> static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
> {
> if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 24323c7d0bd4..af3a0256fa93 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -985,7 +985,7 @@ struct mm_struct {
> * Represent how many empty pages are merged with kernel zero
> * pages when enabling KSM use_zero_pages.
> */
> - unsigned long ksm_zero_pages;
> + atomic_long_t ksm_zero_pages;
> #endif /* CONFIG_KSM */
> #ifdef CONFIG_LRU_GEN_WALKS_MMU
> struct {
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 0f9c491552ff..6e0dca3cecf3 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
> static bool ksm_smart_scan = true;
>
> /* The number of zero pages which is placed by KSM */
> -unsigned long ksm_zero_pages;
> +atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
>
> /* The number of pages that have been skipped due to "smart scanning" */
> static unsigned long ksm_pages_skipped;
> @@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
> * the dirty bit in zero page's PTE is set.
> */
> newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
> - ksm_zero_pages++;
> - mm->ksm_zero_pages++;
> + ksm_map_zero_page(mm);
> /*
> * We're replacing an anonymous page with a zero page, which is
> * not anonymous. We need to do proper accounting otherwise we
> @@ -3373,7 +3372,7 @@ static void wait_while_offlining(void)
> #ifdef CONFIG_PROC_FS
> long ksm_process_profit(struct mm_struct *mm)
> {
> - return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
> + return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
> mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
> }
> #endif /* CONFIG_PROC_FS */
> @@ -3662,7 +3661,7 @@ KSM_ATTR_RO(pages_skipped);
> static ssize_t ksm_zero_pages_show(struct kobject *kobj,
> struct kobj_attribute *attr, char *buf)
> {
> - return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
> + return sysfs_emit(buf, "%ld\n", get_ksm_zero_pages());
> }
> KSM_ATTR_RO(ksm_zero_pages);
>
> @@ -3671,7 +3670,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
> {
> long general_profit;
>
> - general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
> + general_profit = (ksm_pages_sharing + get_ksm_zero_pages()) * PAGE_SIZE -
> ksm_rmap_items * sizeof(struct ksm_rmap_item);
>
> return sysfs_emit(buf, "%ld\n", general_profit);
>

\
 
 \ /
  Last update: 2024-05-08 12:38    [W:0.022 / U:0.420 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site