summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/mm/dump_pagetables.c17
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/kasan_init.c33
3 files changed, 31 insertions, 23 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 363f6470d742..3b93ba0b5d8d 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -111,11 +111,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
#ifdef CONFIG_KASAN
-static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
+static void note_kasan_early_shadow_page(struct seq_file *m,
+ struct pg_state *st)
{
unsigned int prot;
- prot = pte_val(*kasan_zero_pte) &
+ prot = pte_val(*kasan_early_shadow_pte) &
(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
note_page(m, st, prot, 4);
}
@@ -154,8 +155,8 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
int i;
#ifdef CONFIG_KASAN
- if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
- note_kasan_zero_page(m, st);
+ if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
+ note_kasan_early_shadow_page(m, st);
return;
}
#endif
@@ -185,8 +186,8 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
int i;
#ifdef CONFIG_KASAN
- if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
- note_kasan_zero_page(m, st);
+ if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
+ note_kasan_early_shadow_page(m, st);
return;
}
#endif
@@ -215,8 +216,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
int i;
#ifdef CONFIG_KASAN
- if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
- note_kasan_zero_page(m, st);
+ if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
+ note_kasan_early_shadow_page(m, st);
return;
}
#endif
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 76d0708438e9..3e82f66d5c61 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -59,7 +59,7 @@ static void __init setup_zero_pages(void)
order = 7;
/* Limit number of empty zero pages for small memory sizes */
- while (order > 2 && (totalram_pages >> 10) < (1UL << order))
+ while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
@@ -242,7 +242,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
{
/*
* There is no hardware or firmware interface which could trigger a
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index acb9645b762b..bac5c27d11fc 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -107,7 +107,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PGDIR_SIZE) &&
end - address >= PGDIR_SIZE) {
- pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
+ pgd_populate(&init_mm, pg_dir,
+ kasan_early_shadow_p4d);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
continue;
}
@@ -120,7 +121,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, P4D_SIZE) &&
end - address >= P4D_SIZE) {
- p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
+ p4d_populate(&init_mm, p4_dir,
+ kasan_early_shadow_pud);
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
@@ -133,7 +135,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PUD_SIZE) &&
end - address >= PUD_SIZE) {
- pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
+ pud_populate(&init_mm, pu_dir,
+ kasan_early_shadow_pmd);
address = (address + PUD_SIZE) & PUD_MASK;
continue;
}
@@ -146,7 +149,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PMD_SIZE) &&
end - address >= PMD_SIZE) {
- pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
+ pmd_populate(&init_mm, pm_dir,
+ kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@@ -188,7 +192,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pte_val(*pt_dir) = __pa(page) | pgt_prot;
break;
case POPULATE_ZERO_SHADOW:
- page = kasan_zero_page;
+ page = kasan_early_shadow_page;
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
break;
}
@@ -256,14 +260,14 @@ void __init kasan_early_init(void)
unsigned long vmax;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
pte_t pte_z;
- pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
- pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
- p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
+ pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
+ pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
+ p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
kasan_early_detect_facilities();
if (!has_nx)
pgt_prot &= ~_PAGE_NOEXEC;
- pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
+ pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
memsize = get_mem_detect_end();
if (!memsize)
@@ -292,10 +296,13 @@ void __init kasan_early_init(void)
}
/* init kasan zero shadow */
- crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
- crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
- crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
- memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
+ crst_table_init((unsigned long *)kasan_early_shadow_p4d,
+ p4d_val(p4d_z));
+ crst_table_init((unsigned long *)kasan_early_shadow_pud,
+ pud_val(pud_z));
+ crst_table_init((unsigned long *)kasan_early_shadow_pmd,
+ pmd_val(pmd_z));
+ memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);