lkml.org 
[lkml]   [2014]   [Oct]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 05/13] mm: page_alloc: add kasan hooks on alloc and free paths
    Date
    Add kernel address sanitizer hooks to mark allocated page's addresses
    as accessible in corresponding shadow region.
    Mark freed pages as inaccessible.

    Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
    ---
    include/linux/kasan.h | 6 ++++++
    mm/compaction.c | 2 ++
    mm/kasan/kasan.c | 14 ++++++++++++++
    mm/kasan/kasan.h | 1 +
    mm/kasan/report.c | 7 +++++++
    mm/page_alloc.c | 3 +++
    6 files changed, 33 insertions(+)

    diff --git a/include/linux/kasan.h b/include/linux/kasan.h
    index 01c99fe..9714fba 100644
    --- a/include/linux/kasan.h
    +++ b/include/linux/kasan.h
    @@ -30,6 +30,9 @@ static inline void kasan_disable_local(void)

    void kasan_unpoison_shadow(const void *address, size_t size);

    +void kasan_alloc_pages(struct page *page, unsigned int order);
    +void kasan_free_pages(struct page *page, unsigned int order);
    +
    #else /* CONFIG_KASAN */

    static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
    @@ -37,6 +40,9 @@ static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
    static inline void kasan_enable_local(void) {}
    static inline void kasan_disable_local(void) {}

    +static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
    +static inline void kasan_free_pages(struct page *page, unsigned int order) {}
    +
    #endif /* CONFIG_KASAN */

    #endif /* LINUX_KASAN_H */
    diff --git a/mm/compaction.c b/mm/compaction.c
    index edba18a..834f846 100644
    --- a/mm/compaction.c
    +++ b/mm/compaction.c
    @@ -16,6 +16,7 @@
    #include <linux/sysfs.h>
    #include <linux/balloon_compaction.h>
    #include <linux/page-isolation.h>
    +#include <linux/kasan.h>
    #include "internal.h"

    #ifdef CONFIG_COMPACTION
    @@ -59,6 +60,7 @@ static void map_pages(struct list_head *list)
    list_for_each_entry(page, list, lru) {
    arch_alloc_page(page, 0);
    kernel_map_pages(page, 1, 1);
    + kasan_alloc_pages(page, 0);
    }
    }

    diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
    index 8ce738e..5782082 100644
    --- a/mm/kasan/kasan.c
    +++ b/mm/kasan/kasan.c
    @@ -259,6 +259,20 @@ static __always_inline void check_memory_region(unsigned long addr,
    kasan_report_error(&info);
    }

    +void kasan_alloc_pages(struct page *page, unsigned int order)
    +{
    + if (likely(!PageHighMem(page)))
    + kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
    +}
    +
    +void kasan_free_pages(struct page *page, unsigned int order)
    +{
    + if (likely(!PageHighMem(page)))
    + kasan_poison_shadow(page_address(page),
    + PAGE_SIZE << order,
    + KASAN_FREE_PAGE);
    +}
    +
    void __asan_load1(unsigned long addr)
    {
    check_memory_region(addr, 1, false);
    diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
    index 9a9fe9f..ee572c4 100644
    --- a/mm/kasan/kasan.h
    +++ b/mm/kasan/kasan.h
    @@ -6,6 +6,7 @@
    #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
    #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)

    +#define KASAN_FREE_PAGE 0xFF /* page was freed */
    #define KASAN_SHADOW_GAP 0xF9 /* address belongs to shadow memory */

    struct access_info {
    diff --git a/mm/kasan/report.c b/mm/kasan/report.c
    index 89a9aa1..707323b 100644
    --- a/mm/kasan/report.c
    +++ b/mm/kasan/report.c
    @@ -57,6 +57,9 @@ static void print_error_description(struct access_info *info)
    case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
    bug_type = "out of bounds access";
    break;
    + case KASAN_FREE_PAGE:
    + bug_type = "use after free";
    + break;
    case KASAN_SHADOW_GAP:
    bug_type = "wild memory access";
    break;
    @@ -75,6 +78,10 @@ static void print_address_description(struct access_info *info)
    page = virt_to_head_page((void *)info->access_addr);

    switch (shadow_val) {
    + case KASAN_FREE_PAGE:
    + dump_page(page, "kasan error");
    + dump_stack();
    + break;
    case KASAN_SHADOW_GAP:
    pr_err("No metainfo is available for this access.\n");
    dump_stack();
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index 53e10ff..88b5032 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -59,6 +59,7 @@
    #include <linux/page-debug-flags.h>
    #include <linux/hugetlb.h>
    #include <linux/sched/rt.h>
    +#include <linux/kasan.h>

    #include <asm/sections.h>
    #include <asm/tlbflush.h>
    @@ -755,6 +756,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)

    trace_mm_page_free(page, order);
    kmemcheck_free_shadow(page, order);
    + kasan_free_pages(page, order);

    if (PageAnon(page))
    page->mapping = NULL;
    @@ -941,6 +943,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)

    arch_alloc_page(page, order);
    kernel_map_pages(page, 1 << order, 1);
    + kasan_alloc_pages(page, order);

    if (gfp_flags & __GFP_ZERO)
    prep_zero_page(page, order, gfp_flags);
    --
    2.1.2


    \
     
     \ /
      Last update: 2014-10-06 18:41    [W:3.275 / U:0.080 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site