diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2016-09-13 13:08:40 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-09-23 07:54:20 +1000 |
commit | a24553dd02dc6c7d2912af0b4b9c7f833c90e561 (patch) | |
tree | 942dc65a35f5e6df44d9ffb5fe62a2b955f9eb20 /arch | |
parent | 40e1b1cfb529891307b21f6e336d6375d4662cb7 (diff) |
powerpc/pseries: Remove unnecessary syscall trampoline
When we originally added the ability to split the exception vectors from
the kernel (commit 1f6a93e4c35e ("powerpc: Make it possible to move the
interrupt handlers away from the kernel" 2008-09-15)), the LOAD_HANDLER() macro
used an addi instruction to compute the offset of the common handler
from the kernel base address.
Using addi meant the handler had to be within 32K of the kernel base
address, due to the addi instruction taking a signed immediate value.
That necessitated creating a trampoline for the system call handler,
because system_call_common (in entry64.S) is not linked within 32K of
the kernel base address.
Later in commit 61e2390ede3c ("powerpc: Make load_hander handle upto 64k
offset" 2012-11-15) we changed LOAD_HANDLER to take a 64K offset, by
changing it to use ori.
Although system_call_common is not in head_64.S or exceptions-64s.S, it
is included in head-y, which causes it to be linked early in the kernel
text, so in practice it ends up below 64K. Additionally if it can't be
placed below 64K the linker will fail to build with a "relocation
truncated to fit" error.
So remove the trampoline.
Newer toolchains are able to work out that the ori in LOAD_HANDLER only
takes a 16 bit offset, and so they generate a 16 bit relocation. Older
toolchains (binutils 2.22 at least) are not so smart, so we have to add
the @l annotation to tell the assembler to generate a 16 bit relocation.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/exception-64s.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 8 |
2 files changed, 3 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index c2606715cfff..5032a80e8f6a 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -90,7 +90,7 @@ */ #define LOAD_HANDLER(reg, label) \ ld reg,PACAKBASE(r13); /* get high part of &label */ \ - ori reg,reg,(label)-_stext; /* virt addr of handler ... */ + ori reg,reg,((label)-_stext)@l; /* virt addr of handler ... */ /* Exception register prefixes */ #define EXC_HV H diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a91308d6abaf..23018162d749 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -41,7 +41,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ #define SYSCALL_PSERIES_2_RFID \ mfspr r12,SPRN_SRR1 ; \ - LOAD_HANDLER(r10, system_call_entry) ; \ + LOAD_HANDLER(r10, system_call_common) ; \ mtspr SPRN_SRR0,r10 ; \ ld r10,PACAKMSR(r13) ; \ mtspr SPRN_SRR1,r10 ; \ @@ -62,7 +62,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ * is volatile across system calls. */ #define SYSCALL_PSERIES_2_DIRECT \ - LOAD_HANDLER(r12, system_call_entry) ; \ + LOAD_HANDLER(r12, system_call_common) ; \ mtctr r12 ; \ mfspr r12,SPRN_SRR1 ; \ li r10,MSR_RI ; \ @@ -902,10 +902,6 @@ hv_facility_unavailable_relon_trampoline: #endif STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) - .align 7 -system_call_entry: - b system_call_common - ppc64_runlatch_on_trampoline: b __ppc64_runlatch_on |