summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/head.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 12:31:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 12:31:53 -0700
commit5167d09ffad5b16b574d35ce3047ed34caf1e837 (patch)
treefc45dd9cbd578f5010e7b8208ecdfc6534547989 /arch/arm64/kernel/head.S
parent8533ce72718871fb528d853391746f36243273af (diff)
parentea1719672f59eeb85829073b567495c4f472ac9f (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Once again, Catalin's off on holiday and I'm looking after the arm64 tree. Please can you pull the following arm64 updates for 3.17? Note that this branch also includes the new GICv3 driver (merged via a stable tag from Jason's irqchip tree), since there is a fix for older binutils on top. Changes include: - context tracking support (NO_HZ_FULL) which narrowly missed 3.16 - vDSO layout rework following Andy's work on x86 - TEXT_OFFSET fuzzing for bootloader testing - /proc/cpuinfo tidy-up - preliminary work to support 48-bit virtual addresses, but this is currently disabled until KVM has been ported to use it (the patches do, however, bring some nice clean-up) - boot-time CPU sanity checks (especially useful on heterogenous systems) - support for syscall auditing - support for CC_STACKPROTECTOR - defconfig updates" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (55 commits) arm64: add newline to I-cache policy string Revert "arm64: dmi: Add SMBIOS/DMI support" arm64: fpsimd: fix a typo in fpsimd_save_partial_state ENDPROC arm64: don't call break hooks for BRK exceptions from EL0 arm64: defconfig: enable devtmpfs mount option arm64: vdso: fix build error when switching from LE to BE arm64: defconfig: add virtio support for running as a kvm guest arm64: gicv3: Allow GICv3 compilation with older binutils arm64: fix soft lockup due to large tlb flush range arm64/crypto: fix makefile rule for aes-glue-%.o arm64: Do not invoke audit_syscall_* functions if !CONFIG_AUDIT_SYSCALL arm64: Fix barriers used for page table modifications arm64: Add support for 48-bit VA space with 64KB page configuration arm64: asm/pgtable.h pmd/pud definitions clean-up arm64: Determine the vmalloc/vmemmap space at build time based on VA_BITS arm64: Clean up the initial page table creation in head.S arm64: Remove asm/pgtable-*level-types.h files arm64: Remove asm/pgtable-*level-hwdef.h files arm64: Convert bool ARM64_x_LEVELS to int ARM64_PGTABLE_LEVELS arm64: mm: Implement 4 levels of translation tables ...
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r--arch/arm64/kernel/head.S121
1 files changed, 76 insertions, 45 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a2c1195abb7f..144f10567f82 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -22,6 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
@@ -35,37 +36,31 @@
#include <asm/page.h>
#include <asm/virt.h>
-/*
- * swapper_pg_dir is the virtual address of the initial page table. We place
- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
- * 2 pages and is placed below swapper_pg_dir.
- */
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
-#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
-#error KERNEL_RAM_VADDR must start at 0xXXX80000
+#if (TEXT_OFFSET & 0xf) != 0
+#error TEXT_OFFSET must be at least 16B aligned
+#elif (PAGE_OFFSET & 0xfffff) != 0
+#error PAGE_OFFSET must be at least 2MB aligned
+#elif TEXT_OFFSET > 0xfffff
+#error TEXT_OFFSET must be less than 2MB
#endif
-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
-
- .globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
-
- .globl idmap_pg_dir
- .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
-
- .macro pgtbl, ttb0, ttb1, phys
- add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
- sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
+ .macro pgtbl, ttb0, ttb1, virt_to_phys
+ ldr \ttb1, =swapper_pg_dir
+ ldr \ttb0, =idmap_pg_dir
+ add \ttb1, \ttb1, \virt_to_phys
+ add \ttb0, \ttb0, \virt_to_phys
.endm
#ifdef CONFIG_ARM64_64K_PAGES
#define BLOCK_SHIFT PAGE_SHIFT
#define BLOCK_SIZE PAGE_SIZE
+#define TABLE_SHIFT PMD_SHIFT
#else
#define BLOCK_SHIFT SECTION_SHIFT
#define BLOCK_SIZE SECTION_SIZE
+#define TABLE_SHIFT PUD_SHIFT
#endif
#define KERNEL_START KERNEL_RAM_VADDR
@@ -120,9 +115,9 @@ efi_head:
b stext // branch to kernel start, magic
.long 0 // reserved
#endif
- .quad TEXT_OFFSET // Image load offset from start of RAM
- .quad 0 // reserved
- .quad 0 // reserved
+ .quad _kernel_offset_le // Image load offset from start of RAM, little-endian
+ .quad _kernel_size_le // Effective size of kernel image, little-endian
+ .quad _kernel_flags_le // Informative flags, little-endian
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
@@ -295,6 +290,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
msr cnthctl_el2, x0
msr cntvoff_el2, xzr // Clear virtual offset
+#ifdef CONFIG_ARM_GIC_V3
+ /* GICv3 system register access */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #24, #4
+ cmp x0, #1
+ b.ne 3f
+
+ mrs_s x0, ICC_SRE_EL2
+ orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
+ orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
+ msr_s ICC_SRE_EL2, x0
+ isb // Make sure SRE is now set
+ msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
/* Populate ID registers. */
mrs x0, midr_el1
mrs x1, mpidr_el1
@@ -413,7 +425,7 @@ ENTRY(secondary_startup)
mov x23, x0 // x23=current cpu_table
cbz x23, __error_p // invalid processor (x23=0)?
- pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
+ pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
ldr x12, [x23, #CPU_INFO_SETUP]
add x12, x12, x28 // __virt_to_phys
blr x12 // initialise processor
@@ -455,8 +467,13 @@ ENDPROC(__enable_mmu)
* x27 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
+ *
+ * We align the entire function to the smallest power of two larger than it to
+ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
+ * close to the end of a 512MB or 1GB block we might require an additional
+ * table to map the entire function.
*/
- .align 6
+ .align 4
__turn_mmu_on:
msr sctlr_el1, x0
isb
@@ -479,17 +496,38 @@ ENDPROC(__calc_phys_offset)
.quad PAGE_OFFSET
/*
- * Macro to populate the PGD for the corresponding block entry in the next
- * level (tbl) for the given virtual address.
+ * Macro to create a table entry to the next page.
+ *
+ * tbl: page table address
+ * virt: virtual address
+ * shift: #imm page table shift
+ * ptrs: #imm pointers per table page
+ *
+ * Preserves: virt
+ * Corrupts: tmp1, tmp2
+ * Returns: tbl -> next level table page address
+ */
+ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
+ lsr \tmp1, \virt, #\shift
+ and \tmp1, \tmp1, #\ptrs - 1 // table index
+ add \tmp2, \tbl, #PAGE_SIZE
+ orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
+ str \tmp2, [\tbl, \tmp1, lsl #3]
+ add \tbl, \tbl, #PAGE_SIZE // next level table page
+ .endm
+
+/*
+ * Macro to populate the PGD (and possibily PUD) for the corresponding
+ * block entry in the next level (tbl) for the given virtual address.
*
- * Preserves: pgd, tbl, virt
+ * Preserves: tbl, next, virt
* Corrupts: tmp1, tmp2
*/
- .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
- lsr \tmp1, \virt, #PGDIR_SHIFT
- and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
- orr \tmp2, \tbl, #3 // PGD entry table type
- str \tmp2, [\pgd, \tmp1, lsl #3]
+ .macro create_pgd_entry, tbl, virt, tmp1, tmp2
+ create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
+#if SWAPPER_PGTABLE_LEVELS == 3
+ create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
+#endif
.endm
/*
@@ -522,7 +560,7 @@ ENDPROC(__calc_phys_offset)
* - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
- pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
+ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
mov x27, lr
/*
@@ -550,10 +588,10 @@ __create_page_tables:
/*
* Create the identity mapping.
*/
- add x0, x25, #PAGE_SIZE // section table address
+ mov x0, x25 // idmap_pg_dir
ldr x3, =KERNEL_START
add x3, x3, x28 // __pa(KERNEL_START)
- create_pgd_entry x25, x0, x3, x5, x6
+ create_pgd_entry x0, x3, x5, x6
ldr x6, =KERNEL_END
mov x5, x3 // __pa(KERNEL_START)
add x6, x6, x28 // __pa(KERNEL_END)
@@ -562,9 +600,9 @@ __create_page_tables:
/*
* Map the kernel image (starting with PHYS_OFFSET).
*/
- add x0, x26, #PAGE_SIZE // section table address
+ mov x0, x26 // swapper_pg_dir
mov x5, #PAGE_OFFSET
- create_pgd_entry x26, x0, x5, x3, x6
+ create_pgd_entry x0, x5, x3, x6
ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -586,13 +624,6 @@ __create_page_tables:
create_block_map x0, x7, x3, x5, x6
1:
/*
- * Create the pgd entry for the fixed mappings.
- */
- ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
- add x0, x26, #2 * PAGE_SIZE // section table address
- create_pgd_entry x26, x0, x5, x6, x7
-
- /*
* Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate the idmap and swapper page
* tables again to remove any speculatively loaded cache lines.
@@ -611,7 +642,7 @@ ENDPROC(__create_page_tables)
__switch_data:
.quad __mmap_switched
.quad __bss_start // x6
- .quad _end // x7
+ .quad __bss_stop // x7
.quad processor_id // x4
.quad __fdt_pointer // x5
.quad memstart_addr // x6