summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/traps.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b417e2727050..4e00f9bc23ee 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -50,6 +50,7 @@
#include <asm/types.h>
#include <asm/stacktrace.h>
#include <asm/irq.h>
+#include <asm/uasm.h>
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
@@ -1271,11 +1272,6 @@ unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
-/*
- * As a side effect of the way this is implemented we're limited
- * to interrupt handlers in the address range from
- * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
- */
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
@@ -1283,9 +1279,18 @@ void __init *set_except_vector(int n, void *addr)
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
- *(u32 *)(ebase + 0x200) = 0x08000000 |
- (0x03ffffff & (handler >> 2));
- local_flush_icache_range(ebase + 0x200, ebase + 0x204);
+ unsigned long jump_mask = ~((1 << 28) - 1);
+ u32 *buf = (u32 *)(ebase + 0x200);
+ unsigned int k0 = 26;
+ if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
+ uasm_i_j(&buf, handler & ~jump_mask);
+ uasm_i_nop(&buf);
+ } else {
+ UASM_i_LA(&buf, k0, handler);
+ uasm_i_jr(&buf, k0);
+ uasm_i_nop(&buf);
+ }
+ local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}