diff options
-rw-r--r-- | include/linux/kasan.h | 30 | ||||
-rw-r--r-- | mm/kasan/common.c | 8 | ||||
-rw-r--r-- | mm/mempool.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 41 |
4 files changed, 59 insertions, 24 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9f5faefd1744..30aa2bee8400 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -96,6 +96,11 @@ static __always_inline bool kasan_enabled(void) return static_branch_likely(&kasan_flag_enabled); } +static inline bool kasan_has_integrated_init(void) +{ + return kasan_enabled(); +} + #else /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_enabled(void) @@ -103,6 +108,11 @@ static inline bool kasan_enabled(void) return true; } +static inline bool kasan_has_integrated_init(void) +{ + return false; +} + #endif /* CONFIG_KASAN_HW_TAGS */ slab_flags_t __kasan_never_merge(void); @@ -120,20 +130,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size) __kasan_unpoison_range(addr, size); } -void __kasan_alloc_pages(struct page *page, unsigned int order); +void __kasan_alloc_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_alloc_pages(struct page *page, - unsigned int order) + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_alloc_pages(page, order); + __kasan_alloc_pages(page, order, init); } -void __kasan_free_pages(struct page *page, unsigned int order); +void __kasan_free_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_free_pages(struct page *page, - unsigned int order) + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_free_pages(page, order); + __kasan_free_pages(page, order, init); } void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, @@ -277,13 +287,17 @@ static inline bool kasan_enabled(void) { return false; } +static inline bool kasan_has_integrated_init(void) +{ + return false; +} static inline slab_flags_t kasan_never_merge(void) { return 0; } static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} -static inline void kasan_free_pages(struct page *page, unsigned int order) {} +static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {} +static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b3d8eb154f59..efe58e58cc93 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void) return 0; } -void __kasan_alloc_pages(struct page *page, unsigned int order) +void __kasan_alloc_pages(struct page *page, unsigned int order, bool init) { u8 tag; unsigned long i; @@ -108,14 +108,14 @@ void __kasan_alloc_pages(struct page *page, unsigned int order) tag = kasan_random_tag(); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison(page_address(page), PAGE_SIZE << order, false); + kasan_unpoison(page_address(page), PAGE_SIZE << order, init); } -void __kasan_free_pages(struct page *page, unsigned int order) +void __kasan_free_pages(struct page *page, unsigned int order, bool init) { if (likely(!PageHighMem(page))) kasan_poison(page_address(page), PAGE_SIZE << order, - KASAN_FREE_PAGE, false); + KASAN_FREE_PAGE, init); } /* diff --git a/mm/mempool.c b/mm/mempool.c index 79959fac27d7..fe19d290a301 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -106,7 +106,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_slab_free_mempool(element); else if (pool->alloc == mempool_alloc_pages) - kasan_free_pages(element, (unsigned long)pool->pool_data); + kasan_free_pages(element, (unsigned long)pool->pool_data, false); } static void kasan_unpoison_element(mempool_t *pool, void *element) @@ -114,7 +114,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_unpoison_range(element, __ksize(element)); else if (pool->alloc == mempool_alloc_pages) - kasan_alloc_pages(element, (unsigned long)pool->pool_data); + kasan_alloc_pages(element, (unsigned long)pool->pool_data, false); } static __always_inline void add_element(mempool_t *pool, void *element) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 342cabf912a7..6314de5387f5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -397,14 +397,14 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages); * initialization is done, but this is not likely to happen. */ static inline void kasan_free_nondeferred_pages(struct page *page, int order, - fpi_t fpi_flags) + bool init, fpi_t fpi_flags) { if (static_branch_unlikely(&deferred_pages)) return; if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && (fpi_flags & FPI_SKIP_KASAN_POISON)) return; - kasan_free_pages(page, order); + kasan_free_pages(page, order, init); } /* Returns true if the struct page for the pfn is uninitialised */ @@ -456,12 +456,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) } #else static inline void kasan_free_nondeferred_pages(struct page *page, int order, - fpi_t fpi_flags) + bool init, fpi_t fpi_flags) { if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && (fpi_flags & FPI_SKIP_KASAN_POISON)) return; - kasan_free_pages(page, order); + kasan_free_pages(page, order, init); } static inline bool early_page_uninitialised(unsigned long pfn) @@ -1243,6 +1243,7 @@ static __always_inline bool free_pages_prepare(struct page *page, unsigned int order, bool check_free, fpi_t fpi_flags) { int bad = 0; + bool init; VM_BUG_ON_PAGE(PageTail(page), page); @@ -1300,16 +1301,21 @@ static __always_inline bool free_pages_prepare(struct page *page, debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } - if (want_init_on_free()) - kernel_init_free_pages(page, 1 << order); kernel_poison_pages(page, 1 << order); /* + * As memory initialization might be integrated into KASAN, + * kasan_free_pages and kernel_init_free_pages must be + * kept together to avoid discrepancies in behavior. + * * With hardware tag-based KASAN, memory tags must be set before the * page becomes unavailable via debug_pagealloc or arch_free_page. */ - kasan_free_nondeferred_pages(page, order, fpi_flags); + init = want_init_on_free(); + if (init && !kasan_has_integrated_init()) + kernel_init_free_pages(page, 1 << order); + kasan_free_nondeferred_pages(page, order, init, fpi_flags); /* * arch_free_page() can make the page's contents inaccessible. s390 @@ -2316,17 +2322,32 @@ static bool check_new_pages(struct page *page, unsigned int order) inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { + bool init; + set_page_private(page, 0); set_page_refcounted(page); arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); - kasan_alloc_pages(page, order); + + /* + * Page unpoisoning must happen before memory initialization. + * Otherwise, the poison pattern will be overwritten for __GFP_ZERO + * allocations and the page unpoisoning code will complain. + */ kernel_unpoison_pages(page, 1 << order); - set_page_owner(page, order, gfp_flags); - if (!want_init_on_free() && want_init_on_alloc(gfp_flags)) + /* + * As memory initialization might be integrated into KASAN, + * kasan_alloc_pages and kernel_init_free_pages must be + * kept together to avoid discrepancies in behavior. + */ + init = !want_init_on_free() && want_init_on_alloc(gfp_flags); + kasan_alloc_pages(page, order, init); + if (init && !kasan_has_integrated_init()) kernel_init_free_pages(page, 1 << order); + + set_page_owner(page, order, gfp_flags); } static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, |