summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2019-03-05 15:47:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 21:07:19 -0800
commitd9fa9d951779eb8110879f796434876a58321ae9 (patch)
tree4692f742d6bdfecf1ad36d3c0dc16ccfedfd7da3 /arch/arm64/mm
parentaee494424414aa6f511bb837624557e9d3b84823 (diff)
arm64: kdump: no need to mark crashkernel pages manually PG_reserved
The crashkernel is reserved via memblock_reserve(). memblock_free_all() will call free_low_memory_core_early(), which will go over all reserved memblocks, marking the pages as PG_reserved. So manually marking pages as PG_reserved is not necessary, they are already in the desired state (otherwise they would have been handed over to the buddy as free pages and bad things would happen). Link: http://lkml.kernel.org/r/20190114125903.24845-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Matthias Brugger <mbrugger@suse.com> Reviewed-by: Bhupesh Sharma <bhsharma@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: James Morse <james.morse@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Dave Kleikamp <dave.kleikamp@oracle.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Florian Fainelli <f.fainelli@gmail.com> Cc: Stefan Agner <stefan@agner.ch> Cc: Laura Abbott <labbott@redhat.com> Cc: Greg Hackmann <ghackmann@android.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: CHANDAN VN <chandan.vn@samsung.com> Cc: AKASHI Takahiro <takahiro.akashi@linaro.org> Cc: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/init.c27
1 files changed, 0 insertions, 27 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7205a9085b4d..c38976b70069 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -118,35 +118,10 @@ static void __init reserve_crashkernel(void)
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
-
-static void __init kexec_reserve_crashkres_pages(void)
-{
-#ifdef CONFIG_HIBERNATION
- phys_addr_t addr;
- struct page *page;
-
- if (!crashk_res.end)
- return;
-
- /*
- * To reduce the size of hibernation image, all the pages are
- * marked as Reserved initially.
- */
- for (addr = crashk_res.start; addr < (crashk_res.end + 1);
- addr += PAGE_SIZE) {
- page = phys_to_page(addr);
- SetPageReserved(page);
- }
-#endif
-}
#else
static void __init reserve_crashkernel(void)
{
}
-
-static void __init kexec_reserve_crashkres_pages(void)
-{
-}
#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_CRASH_DUMP
@@ -586,8 +561,6 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
memblock_free_all();
- kexec_reserve_crashkres_pages();
-
mem_init_print_info(NULL);
/*