summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReza Arbab <arbab@linux.vnet.ibm.com>2017-01-16 13:07:46 -0600
committerMichael Ellerman <mpe@ellerman.id.au>2017-01-31 13:54:20 +1100
commit0d0a4bc2a6f7de19cb0256a55891955961d70b1c (patch)
tree264459907cfc4dcafc916968264325f6a36cc440
parent4b5d62ca17a1cd2ffc8399e1d1c3ebbabf16e78f (diff)
powerpc/mm: unstub radix__vmemmap_remove_mapping()
Use remove_pagetable() and friends for radix vmemmap removal. We do not require the special-case handling of vmemmap done in the x86 versions of these functions. This is because vmemmap_free() has already freed the mapped pages, and calls us with an aligned address range. So, add a few failsafe WARNs, but otherwise the code to remove physical mappings is already sufficient for vmemmap. Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com> Acked-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/pgtable-radix.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index aef9d49f70ce..30374586e01d 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -527,6 +527,15 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
if (!pte_present(*pte))
continue;
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
+ /*
+ * The vmemmap_free() and remove_section_mapping()
+ * codepaths call us with aligned addresses.
+ */
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+
pte_clear(&init_mm, addr, pte);
}
}
@@ -546,6 +555,12 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
continue;
if (pmd_huge(*pmd)) {
+ if (!IS_ALIGNED(addr, PMD_SIZE) ||
+ !IS_ALIGNED(next, PMD_SIZE)) {
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+
pte_clear(&init_mm, addr, (pte_t *)pmd);
continue;
}
@@ -571,6 +586,12 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
continue;
if (pud_huge(*pud)) {
+ if (!IS_ALIGNED(addr, PUD_SIZE) ||
+ !IS_ALIGNED(next, PUD_SIZE)) {
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+
pte_clear(&init_mm, addr, (pte_t *)pud);
continue;
}
@@ -597,6 +618,12 @@ static void remove_pagetable(unsigned long start, unsigned long end)
continue;
if (pgd_huge(*pgd)) {
+ if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
+ !IS_ALIGNED(next, PGDIR_SIZE)) {
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+
pte_clear(&init_mm, addr, (pte_t *)pgd);
continue;
}
@@ -636,7 +663,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
#ifdef CONFIG_MEMORY_HOTPLUG
void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
{
- /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+ remove_pagetable(start, start + page_size);
}
#endif
#endif