From da844b787245194cfd69f0f1d2fb1dd3640a8a6d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 23:00:06 -0700 Subject: kasan, mm: integrate slab init_on_alloc with HW_TAGS This change uses the previously added memory initialization feature of HW_TAGS KASAN routines for slab memory when init_on_alloc is enabled. With this change, memory initialization memset() is no longer called when both HW_TAGS KASAN and init_on_alloc are enabled. Instead, memory is initialized in KASAN runtime. The memory initialization memset() is moved into slab_post_alloc_hook() that currently directly follows the initialization loop. A new argument is added to slab_post_alloc_hook() that indicates whether to initialize the memory or not. To avoid discrepancies with which memory gets initialized that can be caused by future changes, both KASAN hook and initialization memset() are put together and a warning comment is added. Combining setting allocation tags with memory initialization improves HW_TAGS KASAN performance when init_on_alloc is enabled. Link: https://lkml.kernel.org/r/c1292aeb5d519da221ec74a0684a949b027d7720.1615296150.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Joonsoo Kim Cc: Kevin Brodsky Cc: Pekka Enberg Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kasan/common.c | 4 ++-- mm/slab.c | 28 +++++++++++++--------------- mm/slab.h | 17 +++++++++++++---- mm/slub.c | 27 +++++++++++---------------- 4 files changed, 39 insertions(+), 37 deletions(-) (limited to 'mm') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index efe58e58cc93..ac0d4ed9c921 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -428,7 +428,7 @@ static void set_alloc_info(struct kmem_cache *cache, void *object, } void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, - void *object, gfp_t flags) + void *object, gfp_t flags, bool init) { u8 tag; void *tagged_object; @@ -453,7 +453,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, * Unpoison the whole object. * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. */ - kasan_unpoison(tagged_object, cache->object_size, false); + kasan_unpoison(tagged_object, cache->object_size, init); /* Save alloc info (if possible) for non-kmalloc() allocations. */ if (kasan_stack_collection_enabled()) diff --git a/mm/slab.c b/mm/slab.c index 4e212cda8693..84f183e9b31a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3216,6 +3216,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ void *ptr; int slab_node = numa_mem_id(); struct obj_cgroup *objcg = NULL; + bool init = false; flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); @@ -3254,12 +3255,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ out: local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); - - if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr) - memset(ptr, 0, cachep->object_size); + init = slab_want_init_on_alloc(flags, cachep); out_hooks: - slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr); + slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init); return ptr; } @@ -3301,6 +3300,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo unsigned long save_flags; void *objp; struct obj_cgroup *objcg = NULL; + bool init = false; flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); @@ -3317,12 +3317,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); - - if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp) - memset(objp, 0, cachep->object_size); + init = slab_want_init_on_alloc(flags, cachep); out: - slab_post_alloc_hook(cachep, objcg, flags, 1, &objp); + slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init); return objp; } @@ -3542,18 +3540,18 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); - /* Clear memory outside IRQ disabled section */ - if (unlikely(slab_want_init_on_alloc(flags, s))) - for (i = 0; i < size; i++) - memset(p[i], 0, s->object_size); - - slab_post_alloc_hook(s, objcg, flags, size, p); + /* + * memcg and kmem_cache debug support and memory initialization. + * Done outside of the IRQ disabled section. + */ + slab_post_alloc_hook(s, objcg, flags, size, p, + slab_want_init_on_alloc(flags, s)); /* FIXME: Trace call missing. Christoph would like a bulk variant */ return size; error: local_irq_enable(); cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); - slab_post_alloc_hook(s, objcg, flags, i, p); + slab_post_alloc_hook(s, objcg, flags, i, p, false); __kmem_cache_free_bulk(s, i, p); return 0; } diff --git a/mm/slab.h b/mm/slab.h index c30ed35b3d5d..18c1927cd196 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -506,15 +506,24 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, } static inline void slab_post_alloc_hook(struct kmem_cache *s, - struct obj_cgroup *objcg, - gfp_t flags, size_t size, void **p) + struct obj_cgroup *objcg, gfp_t flags, + size_t size, void **p, bool init) { size_t i; flags &= gfp_allowed_mask; + + /* + * As memory initialization might be integrated into KASAN, + * kasan_slab_alloc and initialization memset must be + * kept together to avoid discrepancies in behavior. + * + * As p[i] might get tagged, memset and kmemleak hook come after KASAN. + */ for (i = 0; i < size; i++) { - p[i] = kasan_slab_alloc(s, p[i], flags); - /* As p[i] might get tagged, call kmemleak hook after KASAN. */ + p[i] = kasan_slab_alloc(s, p[i], flags, init); + if (p[i] && init && !kasan_has_integrated_init()) + memset(p[i], 0, s->object_size); kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags); } diff --git a/mm/slub.c b/mm/slub.c index a178c738fc92..5cf35250f20c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2823,6 +2823,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct page *page; unsigned long tid; struct obj_cgroup *objcg = NULL; + bool init = false; s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) @@ -2900,12 +2901,10 @@ redo: } maybe_wipe_obj_freeptr(s, object); - - if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) - memset(kasan_reset_tag(object), 0, s->object_size); + init = slab_want_init_on_alloc(gfpflags, s); out: - slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); + slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); return object; } @@ -3357,20 +3356,16 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, c->tid = next_tid(c->tid); local_irq_enable(); - /* Clear memory outside IRQ disabled fastpath loop */ - if (unlikely(slab_want_init_on_alloc(flags, s))) { - int j; - - for (j = 0; j < i; j++) - memset(kasan_reset_tag(p[j]), 0, s->object_size); - } - - /* memcg and kmem_cache debug support */ - slab_post_alloc_hook(s, objcg, flags, size, p); + /* + * memcg and kmem_cache debug support and memory initialization. + * Done outside of the IRQ disabled fastpath loop. + */ + slab_post_alloc_hook(s, objcg, flags, size, p, + slab_want_init_on_alloc(flags, s)); return i; error: local_irq_enable(); - slab_post_alloc_hook(s, objcg, flags, i, p); + slab_post_alloc_hook(s, objcg, flags, i, p, false); __kmem_cache_free_bulk(s, i, p); return 0; } @@ -3580,7 +3575,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif - n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL); + n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); page->freelist = get_freepointer(kmem_cache_node, n); page->inuse = 1; page->frozen = 0; -- cgit v1.2.3