lkml.org 
[lkml]   [2019]   [Sep]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v7 5/5] kasan debug: track pages allocated for vmalloc shadow
    Date
    Provide the current number of vmalloc shadow pages in
    /sys/kernel/debug/kasan_vmalloc/shadow_pages.

    Signed-off-by: Daniel Axtens <dja@axtens.net>

    ---

    Merging this is probably overkill, but I leave it to the discretion
    of the broader community.

    On v4 (no dynamic freeing), I saw the following approximate figures
    on my test VM:

    - fresh boot: 720
    - after test_vmalloc: ~14000

    With v5 (lazy dynamic freeing):

    - boot: ~490-500
    - running modprobe test_vmalloc pushes the figures up to sometimes
    as high as ~14000, but they drop down to ~560 after the test ends.
    I'm not sure where the extra sixty pages are from, but running the
    test repeately doesn't cause the number to keep growing, so I don't
    think we're leaking.
    - with vmap_stack, spawning tasks pushes the figure up to ~4200, then
    some clearing kicks in and drops it down to previous levels again.
    ---
    mm/kasan/common.c | 26 ++++++++++++++++++++++++++
    1 file changed, 26 insertions(+)

    diff --git a/mm/kasan/common.c b/mm/kasan/common.c
    index e33cbab83309..e40854512417 100644
    --- a/mm/kasan/common.c
    +++ b/mm/kasan/common.c
    @@ -35,6 +35,7 @@
    #include <linux/vmalloc.h>
    #include <linux/bug.h>
    #include <linux/uaccess.h>
    +#include <linux/debugfs.h>

    #include <asm/tlbflush.h>

    @@ -750,6 +751,8 @@ core_initcall(kasan_memhotplug_init);
    #endif

    #ifdef CONFIG_KASAN_VMALLOC
    +static u64 vmalloc_shadow_pages;
    +
    static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
    void *unused)
    {
    @@ -776,6 +779,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
    if (likely(pte_none(*ptep))) {
    set_pte_at(&init_mm, addr, ptep, pte);
    page = 0;
    + vmalloc_shadow_pages++;
    }
    spin_unlock(&init_mm.page_table_lock);
    if (page)
    @@ -829,6 +833,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
    if (likely(!pte_none(*ptep))) {
    pte_clear(&init_mm, addr, ptep);
    free_page(page);
    + vmalloc_shadow_pages--;
    }
    spin_unlock(&init_mm.page_table_lock);

    @@ -947,4 +952,25 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
    (unsigned long)shadow_end);
    }
    }
    +
    +static __init int kasan_init_vmalloc_debugfs(void)
    +{
    + struct dentry *root, *count;
    +
    + root = debugfs_create_dir("kasan_vmalloc", NULL);
    + if (IS_ERR(root)) {
    + if (PTR_ERR(root) == -ENODEV)
    + return 0;
    + return PTR_ERR(root);
    + }
    +
    + count = debugfs_create_u64("shadow_pages", 0444, root,
    + &vmalloc_shadow_pages);
    +
    + if (IS_ERR(count))
    + return PTR_ERR(root);
    +
    + return 0;
    +}
    +late_initcall(kasan_init_vmalloc_debugfs);
    #endif
    --
    2.20.1
    \
     
     \ /
      Last update: 2019-09-03 16:57    [W:2.756 / U:0.124 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site