summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/debug.S27
-rw-r--r--arch/arm/kernel/dma-isa.c67
-rw-r--r--arch/arm/kernel/dma.c119
-rw-r--r--arch/arm/kernel/entry-armv.S19
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/fiq.c4
-rw-r--r--arch/arm/kernel/irq.c20
-rw-r--r--arch/arm/kernel/module.c88
-rw-r--r--arch/arm/kernel/process.c33
-rw-r--r--arch/arm/kernel/ptrace.c58
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/stacktrace.c88
-rw-r--r--arch/arm/kernel/stacktrace.h9
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c20
-rw-r--r--arch/arm/kernel/time.c21
-rw-r--r--arch/arm/kernel/traps.c44
-rw-r--r--arch/arm/kernel/unwind.c434
-rw-r--r--arch/arm/kernel/vmlinux.lds.S21
21 files changed, 908 insertions, 183 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 4305345987d3..11a5197a221f 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,12 +29,14 @@ obj-$(CONFIG_ATAGS_PROC) += atags.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
+obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9ca8d13f05f7..1680e9e9c831 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -370,6 +370,8 @@
CALL(sys_dup3)
CALL(sys_pipe2)
/* 360 */ CALL(sys_inotify_init1)
+ CALL(sys_preadv)
+ CALL(sys_pwritev)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index f53c58290543..b121b6053cce 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -49,6 +49,33 @@
1002:
.endm
+#elif defined(CONFIG_CPU_XSCALE)
+
+ .macro addruart, rx
+ .endm
+
+ .macro senduart, rd, rx
+ mcr p14, 0, \rd, c8, c0, 0
+ .endm
+
+ .macro busyuart, rd, rx
+1001:
+ mrc p14, 0, \rx, c14, c0, 0
+ tst \rx, #0x10000000
+ beq 1001b
+ .endm
+
+ .macro waituart, rd, rx
+ mov \rd, #0x10000000
+1001:
+ subs \rd, \rd, #1
+ bmi 1002f
+ mrc p14, 0, \rx, c14, c0, 0
+ tst \rx, #0x10000000
+ bne 1001b
+1002:
+ .endm
+
#else
.macro addruart, rx
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 4a3a50495c60..0e88e46fc732 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -24,11 +24,6 @@
#include <asm/dma.h>
#include <asm/mach/dma.h>
-#define ISA_DMA_MODE_READ 0x44
-#define ISA_DMA_MODE_WRITE 0x48
-#define ISA_DMA_MODE_CASCADE 0xc0
-#define ISA_DMA_AUTOINIT 0x10
-
#define ISA_DMA_MASK 0
#define ISA_DMA_MODE 1
#define ISA_DMA_CLRFF 2
@@ -49,38 +44,35 @@ static unsigned int isa_dma_port[8][7] = {
{ 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce }
};
-static int isa_get_dma_residue(dmach_t channel, dma_t *dma)
+static int isa_get_dma_residue(unsigned int chan, dma_t *dma)
{
- unsigned int io_port = isa_dma_port[channel][ISA_DMA_COUNT];
+ unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT];
int count;
count = 1 + inb(io_port);
count |= inb(io_port) << 8;
- return channel < 4 ? count : (count << 1);
+ return chan < 4 ? count : (count << 1);
}
-static void isa_enable_dma(dmach_t channel, dma_t *dma)
+static void isa_enable_dma(unsigned int chan, dma_t *dma)
{
if (dma->invalid) {
unsigned long address, length;
unsigned int mode;
enum dma_data_direction direction;
- mode = channel & 3;
+ mode = (chan & 3) | dma->dma_mode;
switch (dma->dma_mode & DMA_MODE_MASK) {
case DMA_MODE_READ:
- mode |= ISA_DMA_MODE_READ;
direction = DMA_FROM_DEVICE;
break;
case DMA_MODE_WRITE:
- mode |= ISA_DMA_MODE_WRITE;
direction = DMA_TO_DEVICE;
break;
case DMA_MODE_CASCADE:
- mode |= ISA_DMA_MODE_CASCADE;
direction = DMA_BIDIRECTIONAL;
break;
@@ -105,34 +97,31 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma)
address = dma->buf.dma_address;
length = dma->buf.length - 1;
- outb(address >> 16, isa_dma_port[channel][ISA_DMA_PGLO]);
- outb(address >> 24, isa_dma_port[channel][ISA_DMA_PGHI]);
+ outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]);
+ outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]);
- if (channel >= 4) {
+ if (chan >= 4) {
address >>= 1;
length >>= 1;
}
- outb(0, isa_dma_port[channel][ISA_DMA_CLRFF]);
-
- outb(address, isa_dma_port[channel][ISA_DMA_ADDR]);
- outb(address >> 8, isa_dma_port[channel][ISA_DMA_ADDR]);
+ outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]);
- outb(length, isa_dma_port[channel][ISA_DMA_COUNT]);
- outb(length >> 8, isa_dma_port[channel][ISA_DMA_COUNT]);
+ outb(address, isa_dma_port[chan][ISA_DMA_ADDR]);
+ outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]);
- if (dma->dma_mode & DMA_AUTOINIT)
- mode |= ISA_DMA_AUTOINIT;
+ outb(length, isa_dma_port[chan][ISA_DMA_COUNT]);
+ outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]);
- outb(mode, isa_dma_port[channel][ISA_DMA_MODE]);
+ outb(mode, isa_dma_port[chan][ISA_DMA_MODE]);
dma->invalid = 0;
}
- outb(channel & 3, isa_dma_port[channel][ISA_DMA_MASK]);
+ outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]);
}
-static void isa_disable_dma(dmach_t channel, dma_t *dma)
+static void isa_disable_dma(unsigned int chan, dma_t *dma)
{
- outb(channel | 4, isa_dma_port[channel][ISA_DMA_MASK]);
+ outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]);
}
static struct dma_ops isa_dma_ops = {
@@ -160,7 +149,12 @@ static struct resource dma_resources[] = { {
.end = 0x048f
} };
-void __init isa_init_dma(dma_t *dma)
+static dma_t isa_dma[8];
+
+/*
+ * ISA DMA always starts at channel 0
+ */
+void __init isa_init_dma(void)
{
/*
* Try to autodetect presence of an ISA DMA controller.
@@ -178,11 +172,11 @@ void __init isa_init_dma(dma_t *dma)
outb(0xaa, 0x00);
if (inb(0) == 0x55 && inb(0) == 0xaa) {
- int channel, i;
+ unsigned int chan, i;
- for (channel = 0; channel < 8; channel++) {
- dma[channel].d_ops = &isa_dma_ops;
- isa_disable_dma(channel, NULL);
+ for (chan = 0; chan < 8; chan++) {
+ isa_dma[chan].d_ops = &isa_dma_ops;
+ isa_disable_dma(chan, NULL);
}
outb(0x40, 0x0b);
@@ -217,5 +211,12 @@ void __init isa_init_dma(dma_t *dma)
for (i = 0; i < ARRAY_SIZE(dma_resources); i++)
request_resource(&ioport_resource, dma_resources + i);
+
+ for (chan = 0; chan < 8; chan++) {
+ int ret = isa_dma_add(chan, &isa_dma[chan]);
+ if (ret)
+ printk(KERN_ERR "ISADMA%u: unable to register: %d\n",
+ chan, ret);
+ }
}
}
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index d006085ed7e7..7d5b9fb01e71 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
+#include <linux/scatterlist.h>
#include <asm/dma.h>
@@ -23,19 +24,40 @@
DEFINE_SPINLOCK(dma_spin_lock);
EXPORT_SYMBOL(dma_spin_lock);
-static dma_t dma_chan[MAX_DMA_CHANNELS];
+static dma_t *dma_chan[MAX_DMA_CHANNELS];
+
+static inline dma_t *dma_channel(unsigned int chan)
+{
+ if (chan >= MAX_DMA_CHANNELS)
+ return NULL;
+
+ return dma_chan[chan];
+}
+
+int __init isa_dma_add(unsigned int chan, dma_t *dma)
+{
+ if (!dma->d_ops)
+ return -EINVAL;
+
+ sg_init_table(&dma->buf, 1);
+
+ if (dma_chan[chan])
+ return -EBUSY;
+ dma_chan[chan] = dma;
+ return 0;
+}
/*
* Request DMA channel
*
* On certain platforms, we have to allocate an interrupt as well...
*/
-int request_dma(dmach_t channel, const char *device_id)
+int request_dma(unsigned int chan, const char *device_id)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
int ret;
- if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
+ if (!dma)
goto bad_dma;
if (xchg(&dma->lock, 1) != 0)
@@ -47,7 +69,7 @@ int request_dma(dmach_t channel, const char *device_id)
ret = 0;
if (dma->d_ops->request)
- ret = dma->d_ops->request(channel, dma);
+ ret = dma->d_ops->request(chan, dma);
if (ret)
xchg(&dma->lock, 0);
@@ -55,7 +77,7 @@ int request_dma(dmach_t channel, const char *device_id)
return ret;
bad_dma:
- printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel);
+ printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan);
return -EINVAL;
busy:
@@ -68,42 +90,42 @@ EXPORT_SYMBOL(request_dma);
*
* On certain platforms, we have to free interrupt as well...
*/
-void free_dma(dmach_t channel)
+void free_dma(unsigned int chan)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
- if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
+ if (!dma)
goto bad_dma;
if (dma->active) {
- printk(KERN_ERR "dma%d: freeing active DMA\n", channel);
- dma->d_ops->disable(channel, dma);
+ printk(KERN_ERR "dma%d: freeing active DMA\n", chan);
+ dma->d_ops->disable(chan, dma);
dma->active = 0;
}
if (xchg(&dma->lock, 0) != 0) {
if (dma->d_ops->free)
- dma->d_ops->free(channel, dma);
+ dma->d_ops->free(chan, dma);
return;
}
- printk(KERN_ERR "dma%d: trying to free free DMA\n", channel);
+ printk(KERN_ERR "dma%d: trying to free free DMA\n", chan);
return;
bad_dma:
- printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
+ printk(KERN_ERR "dma: trying to free DMA%d\n", chan);
}
EXPORT_SYMBOL(free_dma);
/* Set DMA Scatter-Gather list
*/
-void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
+void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA SG while "
- "DMA active\n", channel);
+ "DMA active\n", chan);
dma->sg = sg;
dma->sgcount = nr_sg;
@@ -115,13 +137,13 @@ EXPORT_SYMBOL(set_dma_sg);
*
* Copy address to the structure, and set the invalid bit
*/
-void __set_dma_addr (dmach_t channel, void *addr)
+void __set_dma_addr (unsigned int chan, void *addr)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA address while "
- "DMA active\n", channel);
+ "DMA active\n", chan);
dma->sg = NULL;
dma->addr = addr;
@@ -133,13 +155,13 @@ EXPORT_SYMBOL(__set_dma_addr);
*
* Copy address to the structure, and set the invalid bit
*/
-void set_dma_count (dmach_t channel, unsigned long count)
+void set_dma_count (unsigned int chan, unsigned long count)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA count while "
- "DMA active\n", channel);
+ "DMA active\n", chan);
dma->sg = NULL;
dma->count = count;
@@ -149,13 +171,13 @@ EXPORT_SYMBOL(set_dma_count);
/* Set DMA direction mode
*/
-void set_dma_mode (dmach_t channel, dmamode_t mode)
+void set_dma_mode (unsigned int chan, unsigned int mode)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA mode while "
- "DMA active\n", channel);
+ "DMA active\n", chan);
dma->dma_mode = mode;
dma->invalid = 1;
@@ -164,42 +186,42 @@ EXPORT_SYMBOL(set_dma_mode);
/* Enable DMA channel
*/
-void enable_dma (dmach_t channel)
+void enable_dma (unsigned int chan)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
if (!dma->lock)
goto free_dma;
if (dma->active == 0) {
dma->active = 1;
- dma->d_ops->enable(channel, dma);
+ dma->d_ops->enable(chan, dma);
}
return;
free_dma:
- printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
+ printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan);
BUG();
}
EXPORT_SYMBOL(enable_dma);
/* Disable DMA channel
*/
-void disable_dma (dmach_t channel)
+void disable_dma (unsigned int chan)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
if (!dma->lock)
goto free_dma;
if (dma->active == 1) {
dma->active = 0;
- dma->d_ops->disable(channel, dma);
+ dma->d_ops->disable(chan, dma);
}
return;
free_dma:
- printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
+ printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan);
BUG();
}
EXPORT_SYMBOL(disable_dma);
@@ -207,45 +229,38 @@ EXPORT_SYMBOL(disable_dma);
/*
* Is the specified DMA channel active?
*/
-int dma_channel_active(dmach_t channel)
+int dma_channel_active(unsigned int chan)
{
- return dma_chan[channel].active;
+ dma_t *dma = dma_channel(chan);
+ return dma->active;
}
EXPORT_SYMBOL(dma_channel_active);
-void set_dma_page(dmach_t channel, char pagenr)
+void set_dma_page(unsigned int chan, char pagenr)
{
- printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
+ printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan);
}
EXPORT_SYMBOL(set_dma_page);
-void set_dma_speed(dmach_t channel, int cycle_ns)
+void set_dma_speed(unsigned int chan, int cycle_ns)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
int ret = 0;
if (dma->d_ops->setspeed)
- ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
+ ret = dma->d_ops->setspeed(chan, dma, cycle_ns);
dma->speed = ret;
}
EXPORT_SYMBOL(set_dma_speed);
-int get_dma_residue(dmach_t channel)
+int get_dma_residue(unsigned int chan)
{
- dma_t *dma = dma_chan + channel;
+ dma_t *dma = dma_channel(chan);
int ret = 0;
if (dma->d_ops->residue)
- ret = dma->d_ops->residue(channel, dma);
+ ret = dma->d_ops->residue(chan, dma);
return ret;
}
EXPORT_SYMBOL(get_dma_residue);
-
-static int __init init_dma(void)
-{
- arch_dma_init(dma_chan);
- return 0;
-}
-
-core_initcall(init_dma);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 85040cfeb5e5..d662a2f1fd85 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -20,6 +20,7 @@
#include <asm/vfpmacros.h>
#include <mach/entry-macro.S>
#include <asm/thread_notify.h>
+#include <asm/unwind.h>
#include "entry-header.S"
@@ -123,6 +124,8 @@ ENDPROC(__und_invalid)
#endif
.macro svc_entry, stack_hole=0
+ UNWIND(.fnstart )
+ UNWIND(.save {r0 - pc} )
sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
SPFIX( tst sp, #4 )
SPFIX( bicne sp, sp, #4 )
@@ -196,6 +199,7 @@ __dabt_svc:
ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ UNWIND(.fnend )
ENDPROC(__dabt_svc)
.align 5
@@ -228,6 +232,7 @@ __irq_svc:
bleq trace_hardirqs_on
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ UNWIND(.fnend )
ENDPROC(__irq_svc)
.ltorg
@@ -278,6 +283,7 @@ __und_svc:
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr_cxsf, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
+ UNWIND(.fnend )
ENDPROC(__und_svc)
.align 5
@@ -320,6 +326,7 @@ __pabt_svc:
ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ UNWIND(.fnend )
ENDPROC(__pabt_svc)
.align 5
@@ -343,6 +350,8 @@ ENDPROC(__pabt_svc)
#endif
.macro usr_entry
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - r12}
@@ -420,6 +429,7 @@ __dabt_usr:
mov r2, sp
adr lr, ret_from_exception
b do_DataAbort
+ UNWIND(.fnend )
ENDPROC(__dabt_usr)
.align 5
@@ -450,6 +460,7 @@ __irq_usr:
mov why, #0
b ret_to_user
+ UNWIND(.fnend )
ENDPROC(__irq_usr)
.ltorg
@@ -484,6 +495,7 @@ __und_usr:
#else
b __und_usr_unknown
#endif
+ UNWIND(.fnend )
ENDPROC(__und_usr)
@
@@ -671,14 +683,18 @@ __pabt_usr:
enable_irq @ Enable interrupts
mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler
+ UNWIND(.fnend )
/* fall through */
/*
* This is the return code to user mode for abort handlers
*/
ENTRY(ret_from_exception)
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
get_thread_info tsk
mov why, #0
b ret_to_user
+ UNWIND(.fnend )
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
@@ -688,6 +704,8 @@ ENDPROC(ret_from_exception)
* previous and next are guaranteed not to be the same.
*/
ENTRY(__switch_to)
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
@@ -717,6 +735,7 @@ ENTRY(__switch_to)
bl atomic_notifier_call_chain
mov r0, r5
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ UNWIND(.fnend )
ENDPROC(__switch_to)
__INIT
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 49a6ba926c2b..b55cb0331809 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -11,6 +11,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <mach/entry-macro.S>
+#include <asm/unwind.h>
#include "entry-header.S"
@@ -22,6 +23,8 @@
* stack.
*/
ret_fast_syscall:
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
disable_irq @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
@@ -38,6 +41,7 @@ ret_fast_syscall:
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
+ UNWIND(.fnend )
/*
* Ok, we need to do extra processing, enter the slow path.
@@ -111,6 +115,7 @@ ENTRY(mcount)
.globl mcount_call
mcount_call:
bl ftrace_stub
+ ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
ENTRY(ftrace_caller)
@@ -122,6 +127,7 @@ ENTRY(ftrace_caller)
.globl ftrace_call
ftrace_call:
bl ftrace_stub
+ ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
#else
@@ -133,6 +139,7 @@ ENTRY(mcount)
adr r0, ftrace_stub
cmp r0, r2
bne trace
+ ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
trace:
@@ -141,6 +148,7 @@ trace:
sub r0, r0, #MCOUNT_INSN_SIZE
mov lr, pc
mov pc, r2
+ mov lr, r1 @ restore lr
ldmia sp!, {r0-r3, pc}
#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 36f81d967979..6ff7919613d7 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -88,7 +88,7 @@ void set_fiq_handler(void *start, unsigned int length)
* disable irqs for the duration. Note - these functions are almost
* entirely coded in assembly.
*/
-void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
+void __naked set_fiq_regs(struct pt_regs *regs)
{
register unsigned long tmp;
asm volatile (
@@ -106,7 +106,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
-void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
+void __naked get_fiq_regs(struct pt_regs *regs)
{
register unsigned long tmp;
asm volatile (
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 363db186cb93..6874c7dca75a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
- seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
.lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
#ifdef CONFIG_SMP
- bad_irq_desc.affinity = CPU_MASK_ALL;
+ cpumask_setall(bad_irq_desc.affinity);
bad_irq_desc.cpu = smp_processor_id();
#endif
init_arch_irq();
@@ -191,15 +196,16 @@ void migrate_irqs(void)
struct irq_desc *desc = irq_desc + i;
if (desc->cpu == cpu) {
- unsigned int newcpu = any_online_cpu(desc->affinity);
-
- if (newcpu == NR_CPUS) {
+ unsigned int newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
+ if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu);
- cpus_setall(desc->affinity);
- newcpu = any_online_cpu(desc->affinity);
+ cpumask_setall(desc->affinity);
+ newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
}
route_irq(desc, i, newcpu);
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index dab48f27263f..bac03c81489d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
+#include <asm/unwind.h>
#ifdef CONFIG_XIP_KERNEL
/*
@@ -66,6 +67,24 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
char *secstrings,
struct module *mod)
{
+#ifdef CONFIG_ARM_UNWIND
+ Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+
+ for (s = sechdrs; s < sechdrs_end; s++) {
+ if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0)
+ mod->arch.unw_sec_init = s;
+ else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0)
+ mod->arch.unw_sec_devinit = s;
+ else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0)
+ mod->arch.unw_sec_core = s;
+ else if (strcmp(".init.text", secstrings + s->sh_name) == 0)
+ mod->arch.sec_init_text = s;
+ else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0)
+ mod->arch.sec_devinit_text = s;
+ else if (strcmp(".text", secstrings + s->sh_name) == 0)
+ mod->arch.sec_core_text = s;
+ }
+#endif
return 0;
}
@@ -104,6 +123,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_NONE:
+ /* ignore */
+ break;
+
case R_ARM_ABS32:
*(u32 *)loc += sym->st_value;
break;
@@ -132,6 +155,35 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
*(u32 *)loc |= offset & 0x00ffffff;
break;
+ case R_ARM_V4BX:
+ /* Preserve Rm and the condition code. Alter
+ * other bits to re-code instruction as
+ * MOV PC,Rm.
+ */
+ *(u32 *)loc &= 0xf000000f;
+ *(u32 *)loc |= 0x01a0f000;
+ break;
+
+ case R_ARM_PREL31:
+ offset = *(u32 *)loc + sym->st_value - loc;
+ *(u32 *)loc = offset & 0x7fffffff;
+ break;
+
+ case R_ARM_MOVW_ABS_NC:
+ case R_ARM_MOVT_ABS:
+ offset = *(u32 *)loc;
+ offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
+ offset = (offset ^ 0x8000) - 0x8000;
+
+ offset += sym->st_value;
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
+ offset >>= 16;
+
+ *(u32 *)loc &= 0xfff0f000;
+ *(u32 *)loc |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+ break;
+
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
@@ -150,14 +202,50 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return -ENOEXEC;
}
+#ifdef CONFIG_ARM_UNWIND
+static void register_unwind_tables(struct module *mod)
+{
+ if (mod->arch.unw_sec_init && mod->arch.sec_init_text)
+ mod->arch.unwind_init =
+ unwind_table_add(mod->arch.unw_sec_init->sh_addr,
+ mod->arch.unw_sec_init->sh_size,
+ mod->arch.sec_init_text->sh_addr,
+ mod->arch.sec_init_text->sh_size);
+ if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text)
+ mod->arch.unwind_devinit =
+ unwind_table_add(mod->arch.unw_sec_devinit->sh_addr,
+ mod->arch.unw_sec_devinit->sh_size,
+ mod->arch.sec_devinit_text->sh_addr,
+ mod->arch.sec_devinit_text->sh_size);
+ if (mod->arch.unw_sec_core && mod->arch.sec_core_text)
+ mod->arch.unwind_core =
+ unwind_table_add(mod->arch.unw_sec_core->sh_addr,
+ mod->arch.unw_sec_core->sh_size,
+ mod->arch.sec_core_text->sh_addr,
+ mod->arch.sec_core_text->sh_size);
+}
+
+static void unregister_unwind_tables(struct module *mod)
+{
+ unwind_table_del(mod->arch.unwind_init);
+ unwind_table_del(mod->arch.unwind_devinit);
+ unwind_table_del(mod->arch.unwind_core);
+}
+#else
+static inline void register_unwind_tables(struct module *mod) { }
+static inline void unregister_unwind_tables(struct module *mod) { }
+#endif
+
int
module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
+ register_unwind_tables(module);
return 0;
}
void
module_arch_cleanup(struct module *mod)
{
+ unregister_unwind_tables(mod);
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d3ea6fa89521..c3265a2e7cd4 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -34,6 +34,7 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/thread_notify.h>
+#include <asm/stacktrace.h>
#include <asm/mach/time.h>
static const char *processor_modes[] = {
@@ -82,7 +83,7 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
-void arm_machine_restart(char mode)
+void arm_machine_restart(char mode, const char *cmd)
{
/*
* Clean and disable cache, and turn off interrupts
@@ -99,7 +100,7 @@ void arm_machine_restart(char mode)
/*
* Now call the architecture specific reboot code.
*/
- arch_reset(mode);
+ arch_reset(mode, cmd);
/*
* Whoops - the architecture was unable to reboot.
@@ -119,7 +120,7 @@ EXPORT_SYMBOL(pm_idle);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-void (*arm_pm_restart)(char str) = arm_machine_restart;
+void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
EXPORT_SYMBOL_GPL(arm_pm_restart);
@@ -194,9 +195,9 @@ void machine_power_off(void)
pm_power_off();
}
-void machine_restart(char * __unused)
+void machine_restart(char *cmd)
{
- arm_pm_restart(reboot_mode);
+ arm_pm_restart(reboot_mode, cmd);
}
void __show_regs(struct pt_regs *regs)
@@ -300,7 +301,7 @@ void release_thread(struct task_struct *dead_task)
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int
-copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
+copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
{
struct thread_info *thread = task_thread_info(p);
@@ -372,23 +373,21 @@ EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *p)
{
- unsigned long fp, lr;
- unsigned long stack_start, stack_end;
+ struct stackframe frame;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_start = (unsigned long)end_of_stack(p);
- stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
-
- fp = thread_saved_fp(p);
+ frame.fp = thread_saved_fp(p);
+ frame.sp = thread_saved_sp(p);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(p);
do {
- if (fp < stack_start || fp > stack_end)
+ int ret = unwind_frame(&frame);
+ if (ret < 0)
return 0;
- lr = ((unsigned long *)fp)[-1];
- if (!in_sched_functions(lr))
- return lr;
- fp = *(unsigned long *) (fp - 12);
+ if (!in_sched_functions(frame.pc))
+ return frame.pc;
} while (count ++ < 16);
return 0;
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index df653ea59250..89882a1d0187 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -653,6 +653,54 @@ static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
}
#endif
+#ifdef CONFIG_VFP
+/*
+ * Get the child VFP state.
+ */
+static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+ union vfp_state *vfp = &thread->vfpstate;
+ struct user_vfp __user *ufp = data;
+
+ vfp_sync_state(thread);
+
+ /* copy the floating point registers */
+ if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs,
+ sizeof(vfp->hard.fpregs)))
+ return -EFAULT;
+
+ /* copy the status and control register */
+ if (put_user(vfp->hard.fpscr, &ufp->fpscr))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Set the child VFP state.
+ */
+static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
+{
+ struct thread_info *thread = task_thread_info(tsk);
+ union vfp_state *vfp = &thread->vfpstate;
+ struct user_vfp __user *ufp = data;
+
+ vfp_sync_state(thread);
+
+ /* copy the floating point registers */
+ if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs,
+ sizeof(vfp->hard.fpregs)))
+ return -EFAULT;
+
+ /* copy the status and control register */
+ if (get_user(vfp->hard.fpscr, &ufp->fpscr))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
@@ -775,6 +823,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
#endif
+#ifdef CONFIG_VFP
+ case PTRACE_GETVFPREGS:
+ ret = ptrace_getvfpregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETVFPREGS:
+ ret = ptrace_setvfpregs(child, (void __user *)data);
+ break;
+#endif
+
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 68d6494c0389..bc5e4128f9f3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -40,6 +40,7 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/traps.h>
+#include <asm/unwind.h>
#include "compat.h"
#include "atags.h"
@@ -685,6 +686,8 @@ void __init setup_arch(char **cmdline_p)
struct machine_desc *mdesc;
char *from = default_command_line;
+ unwind_init();
+
setup_processor();
mdesc = setup_machine(machine_arch_type);
machine_name = mdesc->name;
@@ -780,6 +783,8 @@ static const char *hwcap_str[] = {
"crunch",
"thumbee",
"neon",
+ "vfpv3",
+ "vfpv3d16",
NULL
};
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 55fa7ff96a3e..7801aac3c043 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -93,6 +93,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
*pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
+ flush_pmd_entry(pmd);
/*
* We need to tell the secondary core where to find
@@ -130,6 +131,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.pgdir = 0;
*pmd = __pmd(0);
+ clean_pmd_entry(pmd);
pgd_free(&init_mm, pgd);
if (ret) {
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index fc650f64df43..9f444e5cc165 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -2,35 +2,60 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
-#include "stacktrace.h"
-
-int walk_stackframe(unsigned long fp, unsigned long low, unsigned long high,
- int (*fn)(struct stackframe *, void *), void *data)
+#include <asm/stacktrace.h>
+
+#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
+/*
+ * Unwind the current stack frame and store the new register values in the
+ * structure passed as argument. Unwinding is equivalent to a function return,
+ * hence the new PC value rather than LR should be used for backtrace.
+ *
+ * With framepointer enabled, a simple function prologue looks like this:
+ * mov ip, sp
+ * stmdb sp!, {fp, ip, lr, pc}
+ * sub fp, ip, #4
+ *
+ * A simple function epilogue looks like this:
+ * ldm sp, {fp, sp, pc}
+ *
+ * Note that with framepointer enabled, even the leaf functions have the same
+ * prologue and epilogue, therefore we can ignore the LR value in this case.
+ */
+int unwind_frame(struct stackframe *frame)
{
- struct stackframe *frame;
-
- do {
- /*
- * Check current frame pointer is within bounds
- */
- if (fp < (low + 12) || fp + 4 >= high)
- break;
+ unsigned long high, low;
+ unsigned long fp = frame->fp;
- frame = (struct stackframe *)(fp - 12);
+ /* only go to a higher address on the stack */
+ low = frame->sp;
+ high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
- if (fn(frame, data))
- break;
+ /* check current frame pointer is within bounds */
+ if (fp < (low + 12) || fp + 4 >= high)
+ return -EINVAL;
- /*
- * Update the low bound - the next frame must always
- * be at a higher address than the current frame.
- */
- low = fp + 4;
- fp = frame->fp;
- } while (fp);
+ /* restore the registers from the stack frame */
+ frame->fp = *(unsigned long *)(fp - 12);
+ frame->sp = *(unsigned long *)(fp - 8);
+ frame->pc = *(unsigned long *)(fp - 4);
return 0;
}
+#endif
+
+void walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data)
+{
+ while (1) {
+ int ret;
+
+ if (fn(frame, data))
+ break;
+ ret = unwind_frame(frame);
+ if (ret < 0)
+ break;
+ }
+}
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
@@ -44,7 +69,7 @@ static int save_trace(struct stackframe *frame, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
- unsigned long addr = frame->lr;
+ unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr))
return 0;
@@ -61,11 +86,10 @@ static int save_trace(struct stackframe *frame, void *d)
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct stack_trace_data data;
- unsigned long fp, base;
+ struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
- base = (unsigned long)task_stack_page(tsk);
if (tsk != current) {
#ifdef CONFIG_SMP
@@ -76,14 +100,22 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
BUG();
#else
data.no_sched_functions = 1;
- fp = thread_saved_fp(tsk);
+ frame.fp = thread_saved_fp(tsk);
+ frame.sp = thread_saved_sp(tsk);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(tsk);
#endif
} else {
+ register unsigned long current_sp asm ("sp");
+
data.no_sched_functions = 0;
- asm("mov %0, fp" : "=r" (fp));
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+ frame.pc = (unsigned long)save_stack_trace_tsk;
}
- walk_stackframe(fp, base, base + THREAD_SIZE, save_trace, &data);
+ walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
diff --git a/arch/arm/kernel/stacktrace.h b/arch/arm/kernel/stacktrace.h
deleted file mode 100644
index e9fd20cb5662..000000000000
--- a/arch/arm/kernel/stacktrace.h
+++ /dev/null
@@ -1,9 +0,0 @@
-struct stackframe {
- unsigned long fp;
- unsigned long sp;
- unsigned long lr;
- unsigned long pc;
-};
-
-int walk_stackframe(unsigned long fp, unsigned long low, unsigned long high,
- int (*fn)(struct stackframe *, void *), void *data);
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 42623db7f870..d59a0cd537f0 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -83,6 +83,7 @@
#include <linux/net.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
struct oldabi_stat64 {
unsigned long long st_dev;
@@ -176,21 +177,12 @@ asmlinkage long sys_oabi_fstatat64(int dfd,
int flag)
{
struct kstat stat;
- int error = -EINVAL;
+ int error;
- if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
- goto out;
-
- if (flag & AT_SYMLINK_NOFOLLOW)
- error = vfs_lstat_fd(dfd, filename, &stat);
- else
- error = vfs_stat_fd(dfd, filename, &stat);
-
- if (!error)
- error = cp_oldabi_stat64(&stat, statbuf);
-
-out:
- return error;
+ error = vfs_fstatat(dfd, filename, &stat, flag);
+ if (error)
+ return error;
+ return cp_oldabi_stat64(&stat, statbuf);
}
struct oabi_flock64 {
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index c68b44aa88d2..4cdc4a0bd02d 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -33,6 +33,7 @@
#include <asm/leds.h>
#include <asm/thread_info.h>
+#include <asm/stacktrace.h>
#include <asm/mach/time.h>
/*
@@ -55,14 +56,22 @@ EXPORT_SYMBOL(rtc_lock);
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
- unsigned long fp, pc = instruction_pointer(regs);
+ struct stackframe frame;
- if (in_lock_functions(pc)) {
- fp = regs->ARM_fp;
- pc = ((unsigned long *)fp)[-1];
- }
+ if (!in_lock_functions(regs->ARM_pc))
+ return regs->ARM_pc;
+
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ do {
+ int ret = unwind_frame(&frame);
+ if (ret < 0)
+ return 0;
+ } while (in_lock_functions(frame.pc));
- return pc;
+ return frame.pc;
}
EXPORT_SYMBOL(profile_pc);
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 79abc4ddc0cf..57eb0f6f6005 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -27,6 +27,7 @@
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/traps.h>
+#include <asm/unwind.h>
#include "ptrace.h"
#include "signal.h"
@@ -61,6 +62,7 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
+#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
* physical memory. If it is not there, then we can't dump
@@ -74,6 +76,7 @@ static int verify_stack(unsigned long sp)
return 0;
}
+#endif
/*
* Dump out the contents of some memory nicely...
@@ -150,13 +153,33 @@ static void dump_instr(struct pt_regs *regs)
set_fs(fs);
}
+#ifdef CONFIG_ARM_UNWIND
+static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+ unwind_backtrace(regs, tsk);
+}
+#else
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
- unsigned int fp;
+ unsigned int fp, mode;
int ok = 1;
printk("Backtrace: ");
- fp = regs->ARM_fp;
+
+ if (!tsk)
+ tsk = current;
+
+ if (regs) {
+ fp = regs->ARM_fp;
+ mode = processor_mode(regs);
+ } else if (tsk != current) {
+ fp = thread_saved_fp(tsk);
+ mode = 0x10;
+ } else {
+ asm("mov %0, fp" : "=r" (fp) : : "cc");
+ mode = 0x10;
+ }
+
if (!fp) {
printk("no frame pointer");
ok = 0;
@@ -168,29 +191,20 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
printk("\n");
if (ok)
- c_backtrace(fp, processor_mode(regs));
+ c_backtrace(fp, mode);
}
+#endif
void dump_stack(void)
{
- __backtrace();
+ dump_backtrace(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
- unsigned long fp;
-
- if (!tsk)
- tsk = current;
-
- if (tsk != current)
- fp = thread_saved_fp(tsk);
- else
- asm("mov %0, fp" : "=r" (fp) : : "cc");
-
- c_backtrace(fp, 0x10);
+ dump_backtrace(NULL, tsk);
barrier();
}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
new file mode 100644
index 000000000000..1dedc2c7ff49
--- /dev/null
+++ b/arch/arm/kernel/unwind.c
@@ -0,0 +1,434 @@
+/*
+ * arch/arm/kernel/unwind.c
+ *
+ * Copyright (C) 2008 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Stack unwinding support for ARM
+ *
+ * An ARM EABI version of gcc is required to generate the unwind
+ * tables. For information about the structure of the unwind tables,
+ * see "Exception Handling ABI for the ARM Architecture" at:
+ *
+ * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+#include <asm/unwind.h>
+
+/* Dummy functions to avoid linker complaints */
+void __aeabi_unwind_cpp_pr0(void)
+{
+};
+EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
+
+void __aeabi_unwind_cpp_pr1(void)
+{
+};
+EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
+
+void __aeabi_unwind_cpp_pr2(void)
+{
+};
+EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
+
+struct unwind_ctrl_block {
+ unsigned long vrs[16]; /* virtual register set */
+ unsigned long *insn; /* pointer to the current instructions word */
+ int entries; /* number of entries left to interpret */
+ int byte; /* current byte number in the instructions word */
+};
+
+enum regs {
+ FP = 11,
+ SP = 13,
+ LR = 14,
+ PC = 15
+};
+
+extern struct unwind_idx __start_unwind_idx[];
+extern struct unwind_idx __stop_unwind_idx[];
+
+static DEFINE_SPINLOCK(unwind_lock);
+static LIST_HEAD(unwind_tables);
+
+/* Convert a prel31 symbol to an absolute address */
+#define prel31_to_addr(ptr) \
+({ \
+ /* sign-extend to 32 bits */ \
+ long offset = (((long)*(ptr)) << 1) >> 1; \
+ (unsigned long)(ptr) + offset; \
+})
+
+/*
+ * Binary search in the unwind index. The entries entries are
+ * guaranteed to be sorted in ascending order by the linker.
+ */
+static struct unwind_idx *search_index(unsigned long addr,
+ struct unwind_idx *first,
+ struct unwind_idx *last)
+{
+ pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+
+ if (addr < first->addr) {
+ pr_warning("unwind: Unknown symbol address %08lx\n", addr);
+ return NULL;
+ } else if (addr >= last->addr)
+ return last;
+
+ while (first < last - 1) {
+ struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+
+ if (addr < mid->addr)
+ last = mid;
+ else
+ first = mid;
+ }
+
+ return first;
+}
+
+static struct unwind_idx *unwind_find_idx(unsigned long addr)
+{
+ struct unwind_idx *idx = NULL;
+ unsigned long flags;
+
+ pr_debug("%s(%08lx)\n", __func__, addr);
+
+ if (core_kernel_text(addr))
+ /* main unwind table */
+ idx = search_index(addr, __start_unwind_idx,
+ __stop_unwind_idx - 1);
+ else {
+ /* module unwind tables */
+ struct unwind_table *table;
+
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+ idx = search_index(addr, table->start,
+ table->stop - 1);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+ return idx;
+}
+
+static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
+{
+ unsigned long ret;
+
+ if (ctrl->entries <= 0) {
+ pr_warning("unwind: Corrupt unwind table\n");
+ return 0;
+ }
+
+ ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
+
+ if (ctrl->byte == 0) {
+ ctrl->insn++;
+ ctrl->entries--;
+ ctrl->byte = 3;
+ } else
+ ctrl->byte--;
+
+ return ret;
+}
+
+/*
+ * Execute the current unwind instruction.
+ */
+static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
+{
+ unsigned long insn = unwind_get_byte(ctrl);
+
+ pr_debug("%s: insn = %08lx\n", __func__, insn);
+
+ if ((insn & 0xc0) == 0x00)
+ ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
+ else if ((insn & 0xc0) == 0x40)
+ ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
+ else if ((insn & 0xf0) == 0x80) {
+ unsigned long mask;
+ unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
+ int load_sp, reg = 4;
+
+ insn = (insn << 8) | unwind_get_byte(ctrl);
+ mask = insn & 0x0fff;
+ if (mask == 0) {
+ pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n",
+ insn);
+ return -URC_FAILURE;
+ }
+
+ /* pop R4-R15 according to mask */
+ load_sp = mask & (1 << (13 - 4));
+ while (mask) {
+ if (mask & 1)
+ ctrl->vrs[reg] = *vsp++;
+ mask >>= 1;
+ reg++;
+ }
+ if (!load_sp)
+ ctrl->vrs[SP] = (unsigned long)vsp;
+ } else if ((insn & 0xf0) == 0x90 &&
+ (insn & 0x0d) != 0x0d)
+ ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
+ else if ((insn & 0xf0) == 0xa0) {
+ unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
+ int reg;
+
+ /* pop R4-R[4+bbb] */
+ for (reg = 4; reg <= 4 + (insn & 7); reg++)
+ ctrl->vrs[reg] = *vsp++;
+ if (insn & 0x80)
+ ctrl->vrs[14] = *vsp++;
+ ctrl->vrs[SP] = (unsigned long)vsp;
+ } else if (insn == 0xb0) {
+ ctrl->vrs[PC] = ctrl->vrs[LR];
+ /* no further processing */
+ ctrl->entries = 0;
+ } else if (insn == 0xb1) {
+ unsigned long mask = unwind_get_byte(ctrl);
+ unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
+ int reg = 0;
+
+ if (mask == 0 || mask & 0xf0) {
+ pr_warning("unwind: Spare encoding %04lx\n",
+ (insn << 8) | mask);
+ return -URC_FAILURE;
+ }
+
+ /* pop R0-R3 according to mask */
+ while (mask) {
+ if (mask & 1)
+ ctrl->vrs[reg] = *vsp++;
+ mask >>= 1;
+ reg++;
+ }
+ ctrl->vrs[SP] = (unsigned long)vsp;
+ } else if (insn == 0xb2) {
+ unsigned long uleb128 = unwind_get_byte(ctrl);
+
+ ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
+ } else {
+ pr_warning("unwind: Unhandled instruction %02lx\n", insn);
+ return -URC_FAILURE;
+ }
+
+ pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
+ ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
+
+ return URC_OK;
+}
+
+/*
+ * Unwind a single frame starting with *sp for the symbol at *pc. It
+ * updates the *pc and *sp with the new values.
+ */
+int unwind_frame(struct stackframe *frame)
+{
+ unsigned long high, low;
+ struct unwind_idx *idx;
+ struct unwind_ctrl_block ctrl;
+
+ /* only go to a higher address on the stack */
+ low = frame->sp;
+ high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+
+ pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
+ frame->pc, frame->lr, frame->sp);
+
+ if (!kernel_text_address(frame->pc))
+ return -URC_FAILURE;
+
+ idx = unwind_find_idx(frame->pc);
+ if (!idx) {
+ pr_warning("unwind: Index not found %08lx\n", frame->pc);
+ return -URC_FAILURE;
+ }
+
+ ctrl.vrs[FP] = frame->fp;
+ ctrl.vrs[SP] = frame->sp;
+ ctrl.vrs[LR] = frame->lr;
+ ctrl.vrs[PC] = 0;
+
+ if (idx->insn == 1)
+ /* can't unwind */
+ return -URC_FAILURE;
+ else if ((idx->insn & 0x80000000) == 0)
+ /* prel31 to the unwind table */
+ ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
+ else if ((idx->insn & 0xff000000) == 0x80000000)
+ /* only personality routine 0 supported in the index */
+ ctrl.insn = &idx->insn;
+ else {
+ pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n",
+ idx->insn, idx);
+ return -URC_FAILURE;
+ }
+
+ /* check the personality routine */
+ if ((*ctrl.insn & 0xff000000) == 0x80000000) {
+ ctrl.byte = 2;
+ ctrl.entries = 1;
+ } else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
+ ctrl.byte = 1;
+ ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
+ } else {
+ pr_warning("unwind: Unsupported personality routine %08lx at %p\n",
+ *ctrl.insn, ctrl.insn);
+ return -URC_FAILURE;
+ }
+
+ while (ctrl.entries > 0) {
+ int urc;
+
+ if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
+ return -URC_FAILURE;
+ urc = unwind_exec_insn(&ctrl);
+ if (urc < 0)
+ return urc;
+ }
+
+ if (ctrl.vrs[PC] == 0)
+ ctrl.vrs[PC] = ctrl.vrs[LR];
+
+ frame->fp = ctrl.vrs[FP];
+ frame->sp = ctrl.vrs[SP];
+ frame->lr = ctrl.vrs[LR];
+ frame->pc = ctrl.vrs[PC];
+
+ return URC_OK;
+}
+
+void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+ struct stackframe frame;
+ unsigned long high, low;
+ register unsigned long current_sp asm ("sp");
+
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+ if (!tsk)
+ tsk = current;
+
+ if (regs) {
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ } else if (tsk == current) {
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+ frame.pc = (unsigned long)unwind_backtrace;
+ } else {
+ /* task blocked in __switch_to */
+ frame.fp = thread_saved_fp(tsk);
+ frame.sp = thread_saved_sp(tsk);
+ /*
+ * The function calling __switch_to cannot be a leaf function
+ * so LR is recovered from the stack.
+ */
+ frame.lr = 0;
+ frame.pc = thread_saved_pc(tsk);
+ }
+
+ low = frame.sp & ~(THREAD_SIZE - 1);
+ high = low + THREAD_SIZE;
+
+ while (1) {
+ int urc;
+ unsigned long where = frame.pc;
+
+ urc = unwind_frame(&frame);
+ if (urc < 0)
+ break;
+ dump_backtrace_entry(where, frame.pc, frame.sp - 4);
+ }
+}
+
+struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
+ unsigned long text_addr,
+ unsigned long text_size)
+{
+ unsigned long flags;
+ struct unwind_idx *idx;
+ struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
+
+ pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
+ text_addr, text_size);
+
+ if (!tab)
+ return tab;
+
+ tab->start = (struct unwind_idx *)start;
+ tab->stop = (struct unwind_idx *)(start + size);
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+ /* Convert the symbol addresses to absolute values */
+ for (idx = tab->start; idx < tab->stop; idx++)
+ idx->addr = prel31_to_addr(&idx->addr);
+
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+ spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+}
+
+void unwind_table_del(struct unwind_table *tab)
+{
+ unsigned long flags;
+
+ if (!tab)
+ return;
+
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+ spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+}
+
+int __init unwind_init(void)
+{
+ struct unwind_idx *idx;
+
+ /* Convert the symbol addresses to absolute values */
+ for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
+ idx->addr = prel31_to_addr(&idx->addr);
+
+ pr_debug("unwind: ARM stack unwinding initialised\n");
+
+ return 0;
+}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 00216071eaf7..c90f27250ead 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -64,7 +64,9 @@ SECTIONS
__initramfs_end = .;
#endif
. = ALIGN(4096);
+ __per_cpu_load = .;
__per_cpu_start = .;
+ *(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
@@ -80,6 +82,8 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.ARM.exidx.exit.text)
+ *(.ARM.extab.exit.text)
#ifndef CONFIG_MMU
*(.fixup)
*(__ex_table)
@@ -110,6 +114,23 @@ SECTIONS
_etext = .; /* End of text and rodata section */
+#ifdef CONFIG_ARM_UNWIND
+ /*
+ * Stack unwinding tables
+ */
+ . = ALIGN(8);
+ .ARM.unwind_idx : {
+ __start_unwind_idx = .;
+ *(.ARM.exidx*)
+ __stop_unwind_idx = .;
+ }
+ .ARM.unwind_tab : {
+ __start_unwind_tab = .;
+ *(.ARM.extab*)
+ __stop_unwind_tab = .;
+ }
+#endif
+
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;