summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-09-15 14:50:22 +0200
committerard <ard.biesheuvel@linaro.org>2015-12-13 19:18:28 +0100
commit1bdb2d4ee05f2fdad4d8a82d7e0ce8d6d91ec4ac (patch)
treeaf0656e07c2a2501c8f0b8935dc008a09b7454ba /arch/arm/mm
parent2937367b8a4b0d46ce3312cb997e4a240b02cf15 (diff)
ARM: split off core mapping logic from create_mapping
In order to be able to reuse the core mapping logic of create_mapping for mapping the UEFI Runtime Services into a private set of page tables, split it off from create_mapping() into a separate function __create_mapping which we will wire up in a subsequent patch. Tested-by: Ryan Harkin <ryan.harkin@linaro.org> Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/mmu.c56
1 files changed, 31 insertions, 25 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index de19f90221e2..3100de92148b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -818,7 +818,8 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
}
#ifndef CONFIG_ARM_LPAE
-static void __init create_36bit_mapping(struct map_desc *md,
+static void __init create_36bit_mapping(struct mm_struct *mm,
+ struct map_desc *md,
const struct mem_type *type)
{
unsigned long addr, length, end;
@@ -859,7 +860,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset(mm, addr);
end = addr + length;
do {
pud_t *pud = pud_offset(pgd, addr);
@@ -876,33 +877,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
}
#endif /* !CONFIG_ARM_LPAE */
-/*
- * Create the page directory entries and any necessary
- * page tables for the mapping specified by `md'. We
- * are able to cope here with varying sizes and address
- * offsets, and we take full advantage of sections and
- * supersections.
- */
-static void __init create_mapping(struct map_desc *md)
+static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md)
{
unsigned long addr, length, end;
phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
- if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
- pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
- return;
- }
-
- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
- (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
- pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
- }
-
type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE
@@ -910,7 +891,7 @@ static void __init create_mapping(struct map_desc *md)
* Catch 36-bit addresses
*/
if (md->pfn >= 0x100000) {
- create_36bit_mapping(md, type);
+ create_36bit_mapping(mm, md, type);
return;
}
#endif
@@ -925,7 +906,7 @@ static void __init create_mapping(struct map_desc *md)
return;
}
- pgd = pgd_offset_k(addr);
+ pgd = pgd_offset(mm, addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
@@ -938,6 +919,31 @@ static void __init create_mapping(struct map_desc *md)
}
/*
+ * Create the page directory entries and any necessary
+ * page tables for the mapping specified by `md'. We
+ * are able to cope here with varying sizes and address
+ * offsets, and we take full advantage of sections and
+ * supersections.
+ */
+static void __init create_mapping(struct map_desc *md)
+{
+ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+ pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ return;
+ }
+
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ }
+
+ __create_mapping(&init_mm, md);
+}
+
+/*
* Create the architecture specific mappings
*/
void __init iotable_init(struct map_desc *io_desc, int nr)