diff options
author | Matt Fleming <matt.fleming@intel.com> | 2014-03-13 14:58:42 +0000 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2014-03-17 21:54:17 +0000 |
commit | e10848a26a962e404ac00c897dfe54f14290806d (patch) | |
tree | 3be0d9d69d2471749b22743f02d3dd98b59a9aed /arch/x86/platform/efi | |
parent | 617b3c37da78cb89c63ed880b2405afc7490567b (diff) |
x86/efi: Preserve segment registers in mixed mode
I was triggering a #GP(0) from userland when running with
CONFIG_EFI_MIXED and CONFIG_IA32_EMULATION, from what looked like
register corruption. Turns out that the mixed mode code was trashing the
contents of %ds, %es and %ss in __efi64_thunk().
Save and restore the contents of these segment registers across the call
to __efi64_thunk() so that we don't corrupt the CPU context.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/platform/efi')
-rw-r--r-- | arch/x86/platform/efi/efi_stub_64.S | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 65b787a9fc4e..e0984ef0374b 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -176,6 +176,13 @@ ENDPROC(efi_call6) * This function must be invoked with a 1:1 mapped stack. */ ENTRY(__efi64_thunk) + movl %ds, %eax + push %rax + movl %es, %eax + push %rax + movl %ss, %eax + push %rax + subq $32, %rsp movl %esi, 0x0(%rsp) movl %edx, 0x4(%rsp) @@ -191,7 +198,7 @@ ENTRY(__efi64_thunk) movq %rbx, func_rt_ptr(%rip) /* Switch to gdt with 32-bit segments */ - movl 40(%rsp), %eax + movl 64(%rsp), %eax lgdt (%rax) leaq efi_enter32(%rip), %rax @@ -203,6 +210,13 @@ ENTRY(__efi64_thunk) lgdt save_gdt(%rip) + pop %rbx + movl %ebx, %ss + pop %rbx + movl %ebx, %es + pop %rbx + movl %ebx, %ds + /* * Convert 32-bit status code into 64-bit. */ @@ -218,11 +232,6 @@ ENTRY(__efi64_thunk) ENDPROC(__efi64_thunk) ENTRY(efi_exit32) - xorq %rax, %rax - movl %eax, %ds - movl %eax, %es - movl %eax, %ss - movq func_rt_ptr(%rip), %rax push %rax mov %rdi, %rax @@ -267,7 +276,7 @@ ENTRY(efi_enter32) */ cli - movl 44(%esp), %eax + movl 68(%esp), %eax movl %eax, 2(%eax) lgdtl (%eax) @@ -286,7 +295,7 @@ ENTRY(efi_enter32) xorl %eax, %eax lldt %ax - movl 48(%esp), %eax + movl 72(%esp), %eax pushl $__KERNEL_CS pushl %eax |