lkml.org 
[lkml]   [2020]   [Nov]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v9 41/44] kasan, mm: reset tags when accessing metadata
On Tue, Nov 10, 2020 at 11:12 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> Kernel allocator code accesses metadata for slab objects, that may lie
> out-of-bounds of the object itself, or be accessed when an object is freed.
> Such accesses trigger tag faults and lead to false-positive reports with
> hardware tag-based KASAN.
>
> Software KASAN modes disable instrumentation for allocator code via
> KASAN_SANITIZE Makefile macro, and rely on kasan_enable/disable_current()
> annotations which are used to ignore KASAN reports.
>
> With hardware tag-based KASAN neither of those options are available, as
> it doesn't use compiler instrumetation, no tag faults are ignored, and MTE
> is disabled after the first one.
>
> Instead, reset tags when accessing metadata (currently only for SLUB).
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Acked-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>


> ---
> Change-Id: I39f3c4d4f29299d4fbbda039bedf230db1c746fb
> ---
> mm/page_alloc.c | 4 +++-
> mm/page_poison.c | 2 +-
> mm/slub.c | 29 ++++++++++++++++-------------
> 3 files changed, 20 insertions(+), 15 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 24b45261e2bd..f1648aee8d88 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1195,8 +1195,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
>
> /* s390's use of memset() could override KASAN redzones. */
> kasan_disable_current();
> - for (i = 0; i < numpages; i++)
> + for (i = 0; i < numpages; i++) {
> + page_kasan_tag_reset(page + i);
> clear_highpage(page + i);
> + }
> kasan_enable_current();
> }
>
> diff --git a/mm/page_poison.c b/mm/page_poison.c
> index ae0482cded87..e6c994af7518 100644
> --- a/mm/page_poison.c
> +++ b/mm/page_poison.c
> @@ -53,7 +53,7 @@ static void poison_page(struct page *page)
>
> /* KASAN still think the page is in-use, so skip it. */
> kasan_disable_current();
> - memset(addr, PAGE_POISON, PAGE_SIZE);
> + memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
> kasan_enable_current();
> kunmap_atomic(addr);
> }
> diff --git a/mm/slub.c b/mm/slub.c
> index b30be2385d1c..df2fd5b57df1 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
> {
> #ifdef CONFIG_SLAB_FREELIST_HARDENED
> /*
> - * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
> + * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
> * Normally, this doesn't cause any issues, as both set_freepointer()
> * and get_freepointer() are called with a pointer with the same tag.
> * However, there are some issues with CONFIG_SLUB_DEBUG code. For
> @@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,
>
> static inline void *get_freepointer(struct kmem_cache *s, void *object)
> {
> + object = kasan_reset_tag(object);
> return freelist_dereference(s, object + s->offset);
> }
>
> @@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
> BUG_ON(object == fp); /* naive detection of double free or corruption */
> #endif
>
> + freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
> *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
> }
>
> @@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr,
> unsigned int length)
> {
> metadata_access_enable();
> - print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
> - length, 1);
> + print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
> + 16, 1, addr, length, 1);
> metadata_access_disable();
> }
>
> @@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
>
> p = object + get_info_end(s);
>
> - return p + alloc;
> + return kasan_reset_tag(p + alloc);
> }
>
> static void set_track(struct kmem_cache *s, void *object,
> @@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object,
> unsigned int nr_entries;
>
> metadata_access_enable();
> - nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
> + nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
> + TRACK_ADDRS_COUNT, 3);
> metadata_access_disable();
>
> if (nr_entries < TRACK_ADDRS_COUNT)
> @@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
>
> static void init_object(struct kmem_cache *s, void *object, u8 val)
> {
> - u8 *p = object;
> + u8 *p = kasan_reset_tag(object);
>
> if (s->flags & SLAB_RED_ZONE)
> memset(p - s->red_left_pad, val, s->red_left_pad);
> @@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
> u8 *addr = page_address(page);
>
> metadata_access_enable();
> - fault = memchr_inv(start, value, bytes);
> + fault = memchr_inv(kasan_reset_tag(start), value, bytes);
> metadata_access_disable();
> if (!fault)
> return 1;
> @@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
>
> pad = end - remainder;
> metadata_access_enable();
> - fault = memchr_inv(pad, POISON_INUSE, remainder);
> + fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
> metadata_access_disable();
> if (!fault)
> return 1;
> @@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
> return;
>
> metadata_access_enable();
> - memset(addr, POISON_INUSE, page_size(page));
> + memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
> metadata_access_disable();
> }
>
> @@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
> * Clear the object and the metadata, but don't touch
> * the redzone.
> */
> - memset(object, 0, s->object_size);
> + memset(kasan_reset_tag(object), 0, s->object_size);
> rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
> : 0;
> - memset((char *)object + s->inuse, 0,
> + memset((char *)kasan_reset_tag(object) + s->inuse, 0,
> s->size - s->inuse - rsize);
>
> }
> @@ -2883,10 +2886,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
> stat(s, ALLOC_FASTPATH);
> }
>
> - maybe_wipe_obj_freeptr(s, object);
> + maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
>
> if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
> - memset(object, 0, s->object_size);
> + memset(kasan_reset_tag(object), 0, s->object_size);
>
> slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
>
> --
> 2.29.2.222.g5d2a92d10f8-goog
>


--
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

\
 
 \ /
  Last update: 2020-11-12 17:10    [W:1.403 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site