diff options
author | Vasily Gorbik <gor@linux.ibm.com> | 2019-08-02 12:42:59 +0200 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2019-12-11 19:56:59 +0100 |
commit | 3e39ce266a273e5751e5792aee248cb8d9630c8b (patch) | |
tree | 729b1ac0d497523ab830af0ac974126b1e68d0cf /arch/s390/mm | |
parent | 1b68ac8678a8e9993deebd55014cbe803e78ca02 (diff) |
s390/kasan: add KASAN_VMALLOC support
Add KASAN_VMALLOC support which now enables vmalloc memory area access
checks as well as enables usage of VMAP_STACK under kasan.
KASAN_VMALLOC changes the way vmalloc and modules areas shadow memory
is handled. With this new approach only top level page tables are
pre-populated and lower levels are filled dynamically upon memory
allocation.
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/kasan_init.c | 68 |
1 files changed, 56 insertions, 12 deletions
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index 460f25572940..06345616a646 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c @@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void) enum populate_mode { POPULATE_ONE2ONE, POPULATE_MAP, - POPULATE_ZERO_SHADOW + POPULATE_ZERO_SHADOW, + POPULATE_SHALLOW }; static void __init kasan_early_vmemmap_populate(unsigned long address, unsigned long end, @@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, pgd_populate(&init_mm, pg_dir, p4_dir); } + if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) && + mode == POPULATE_SHALLOW) { + address = (address + P4D_SIZE) & P4D_MASK; + continue; + } + p4_dir = p4d_offset(pg_dir, address); if (p4d_none(*p4_dir)) { if (mode == POPULATE_ZERO_SHADOW && @@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, p4d_populate(&init_mm, p4_dir, pu_dir); } + if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) && + mode == POPULATE_SHALLOW) { + address = (address + PUD_SIZE) & PUD_MASK; + continue; + } + pu_dir = pud_offset(p4_dir, address); if (pud_none(*pu_dir)) { if (mode == POPULATE_ZERO_SHADOW && @@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, page = kasan_early_shadow_page; pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; break; + case POPULATE_SHALLOW: + /* should never happen */ + break; } } address += PAGE_SIZE; @@ -313,22 +329,50 @@ void __init kasan_early_init(void) init_mm.pgd = early_pg_dir; /* * Current memory layout: - * +- 0 -------------+ +- shadow start -+ - * | 1:1 ram mapping | /| 1/8 ram | - * +- end of ram ----+ / +----------------+ - * | ... gap ... |/ | kasan | - * +- shadow start --+ | zero | - * | 1/8 addr space | | page | - * +- shadow end -+ | mapping | - * | ... gap ... |\ | (untracked) | - * +- modules vaddr -+ \ +----------------+ - * | 2Gb | \| unmapped | allocated per module - * +-----------------+ +- shadow end ---+ + * +- 0 -------------+ +- shadow start -+ + * | 1:1 ram mapping | /| 1/8 ram | + * | | / | | + * +- end of ram ----+ / +----------------+ + * | ... gap ... | / | | + * | |/ | kasan | + * +- shadow start --+ | zero | + * | 1/8 addr space | | page | + * +- shadow end -+ | mapping | + * | ... gap ... |\ | (untracked) | + * +- vmalloc area -+ \ | | + * | vmalloc_size | \ | | + * +- modules vaddr -+ \ +----------------+ + * | 2Gb | \| unmapped | allocated per module + * +-----------------+ +- shadow end ---+ + * + * Current memory layout (KASAN_VMALLOC): + * +- 0 -------------+ +- shadow start -+ + * | 1:1 ram mapping | /| 1/8 ram | + * | | / | | + * +- end of ram ----+ / +----------------+ + * | ... gap ... | / | kasan | + * | |/ | zero | + * +- shadow start --+ | page | + * | 1/8 addr space | | mapping | + * +- shadow end -+ | (untracked) | + * | ... gap ... |\ | | + * +- vmalloc area -+ \ +- vmalloc area -+ + * | vmalloc_size | \ |shallow populate| + * +- modules vaddr -+ \ +- modules area -+ + * | 2Gb | \|shallow populate| + * +-----------------+ +- shadow end ---+ */ /* populate kasan shadow (for identity mapping and zero page mapping) */ kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); if (IS_ENABLED(CONFIG_MODULES)) untracked_mem_end = vmax - MODULES_LEN; + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + untracked_mem_end = vmax - vmalloc_size - MODULES_LEN; + /* shallowly populate kasan shadow for vmalloc and modules */ + kasan_early_vmemmap_populate(__sha(untracked_mem_end), + __sha(vmax), POPULATE_SHALLOW); + } + /* populate kasan shadow for untracked memory */ kasan_early_vmemmap_populate(__sha(max_physmem_end), __sha(untracked_mem_end), POPULATE_ZERO_SHADOW); |