lkml.org 
[lkml]   [2020]   [Nov]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH mm v4 13/19] kasan, mm: rename kasan_poison_kfree
    From
    Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better
    reflects what this annotation does. Also add a comment that explains the
    PageSlab() check.

    No functional changes.

    Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
    Reviewed-by: Marco Elver <elver@google.com>
    Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810
    ---
    include/linux/kasan.h | 16 ++++++++--------
    mm/kasan/common.c | 40 +++++++++++++++++++++++-----------------
    mm/mempool.c | 2 +-
    3 files changed, 32 insertions(+), 26 deletions(-)

    diff --git a/include/linux/kasan.h b/include/linux/kasan.h
    index f631f99aa4b4..2610438120ce 100644
    --- a/include/linux/kasan.h
    +++ b/include/linux/kasan.h
    @@ -175,6 +175,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
    return false;
    }

    +void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
    +static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
    +{
    + if (kasan_enabled())
    + __kasan_slab_free_mempool(ptr, ip);
    +}
    +
    void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
    void *object, gfp_t flags);
    static __always_inline void * __must_check kasan_slab_alloc(
    @@ -215,13 +222,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
    return (void *)object;
    }

    -void __kasan_poison_kfree(void *ptr, unsigned long ip);
    -static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip)
    -{
    - if (kasan_enabled())
    - __kasan_poison_kfree(ptr, ip);
    -}
    -
    void __kasan_kfree_large(void *ptr, unsigned long ip);
    static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
    {
    @@ -260,6 +260,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
    {
    return false;
    }
    +static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
    static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
    gfp_t flags)
    {
    @@ -279,7 +280,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
    {
    return (void *)object;
    }
    -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
    static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}

    #endif /* CONFIG_KASAN */
    diff --git a/mm/kasan/common.c b/mm/kasan/common.c
    index 17918bd20ed9..1205faac90bd 100644
    --- a/mm/kasan/common.c
    +++ b/mm/kasan/common.c
    @@ -335,6 +335,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
    return ____kasan_slab_free(cache, object, ip, true);
    }

    +void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
    +{
    + struct page *page;
    +
    + page = virt_to_head_page(ptr);
    +
    + /*
    + * Even though this function is only called for kmem_cache_alloc and
    + * kmalloc backed mempool allocations, those allocations can still be
    + * !PageSlab() when the size provided to kmalloc is larger than
    + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
    + */
    + if (unlikely(!PageSlab(page))) {
    + if (ptr != page_address(page)) {
    + kasan_report_invalid_free(ptr, ip);
    + return;
    + }
    + poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
    + } else {
    + ____kasan_slab_free(page->slab_cache, ptr, ip, false);
    + }
    +}
    +
    static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
    {
    kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
    @@ -429,23 +452,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
    flags, true);
    }

    -void __kasan_poison_kfree(void *ptr, unsigned long ip)
    -{
    - struct page *page;
    -
    - page = virt_to_head_page(ptr);
    -
    - if (unlikely(!PageSlab(page))) {
    - if (ptr != page_address(page)) {
    - kasan_report_invalid_free(ptr, ip);
    - return;
    - }
    - poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
    - } else {
    - ____kasan_slab_free(page->slab_cache, ptr, ip, false);
    - }
    -}
    -
    void __kasan_kfree_large(void *ptr, unsigned long ip)
    {
    if (ptr != page_address(virt_to_head_page(ptr)))
    diff --git a/mm/mempool.c b/mm/mempool.c
    index 583a9865b181..624ed51b060f 100644
    --- a/mm/mempool.c
    +++ b/mm/mempool.c
    @@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
    static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
    {
    if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
    - kasan_poison_kfree(element, _RET_IP_);
    + kasan_slab_free_mempool(element, _RET_IP_);
    else if (pool->alloc == mempool_alloc_pages)
    kasan_free_pages(element, (unsigned long)pool->pool_data);
    }
    --
    2.29.2.454.gaff20da3a2-goog
    \
     
     \ /
      Last update: 2020-11-23 21:39    [W:4.293 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site