diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 41 |
1 files changed, 31 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 342cabf912a7..6314de5387f5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -397,14 +397,14 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages); * initialization is done, but this is not likely to happen. */ static inline void kasan_free_nondeferred_pages(struct page *page, int order, - fpi_t fpi_flags) + bool init, fpi_t fpi_flags) { if (static_branch_unlikely(&deferred_pages)) return; if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && (fpi_flags & FPI_SKIP_KASAN_POISON)) return; - kasan_free_pages(page, order); + kasan_free_pages(page, order, init); } /* Returns true if the struct page for the pfn is uninitialised */ @@ -456,12 +456,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) } #else static inline void kasan_free_nondeferred_pages(struct page *page, int order, - fpi_t fpi_flags) + bool init, fpi_t fpi_flags) { if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && (fpi_flags & FPI_SKIP_KASAN_POISON)) return; - kasan_free_pages(page, order); + kasan_free_pages(page, order, init); } static inline bool early_page_uninitialised(unsigned long pfn) @@ -1243,6 +1243,7 @@ static __always_inline bool free_pages_prepare(struct page *page, unsigned int order, bool check_free, fpi_t fpi_flags) { int bad = 0; + bool init; VM_BUG_ON_PAGE(PageTail(page), page); @@ -1300,16 +1301,21 @@ static __always_inline bool free_pages_prepare(struct page *page, debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } - if (want_init_on_free()) - kernel_init_free_pages(page, 1 << order); kernel_poison_pages(page, 1 << order); /* + * As memory initialization might be integrated into KASAN, + * kasan_free_pages and kernel_init_free_pages must be + * kept together to avoid discrepancies in behavior. + * * With hardware tag-based KASAN, memory tags must be set before the * page becomes unavailable via debug_pagealloc or arch_free_page. */ - kasan_free_nondeferred_pages(page, order, fpi_flags); + init = want_init_on_free(); + if (init && !kasan_has_integrated_init()) + kernel_init_free_pages(page, 1 << order); + kasan_free_nondeferred_pages(page, order, init, fpi_flags); /* * arch_free_page() can make the page's contents inaccessible. s390 @@ -2316,17 +2322,32 @@ static bool check_new_pages(struct page *page, unsigned int order) inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { + bool init; + set_page_private(page, 0); set_page_refcounted(page); arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); - kasan_alloc_pages(page, order); + + /* + * Page unpoisoning must happen before memory initialization. + * Otherwise, the poison pattern will be overwritten for __GFP_ZERO + * allocations and the page unpoisoning code will complain. + */ kernel_unpoison_pages(page, 1 << order); - set_page_owner(page, order, gfp_flags); - if (!want_init_on_free() && want_init_on_alloc(gfp_flags)) + /* + * As memory initialization might be integrated into KASAN, + * kasan_alloc_pages and kernel_init_free_pages must be + * kept together to avoid discrepancies in behavior. + */ + init = !want_init_on_free() && want_init_on_alloc(gfp_flags); + kasan_alloc_pages(page, order, init); + if (init && !kasan_has_integrated_init()) kernel_init_free_pages(page, 1 << order); + + set_page_owner(page, order, gfp_flags); } static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, |