diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/Kconfig | 2 | ||||
-rw-r--r-- | arch/sh/boards/mach-ecovec24/setup.c | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/fpu.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/pgalloc.h | 5 | ||||
-rw-r--r-- | arch/sh/include/asm/processor_32.h | 10 | ||||
-rw-r--r-- | arch/sh/include/asm/processor_64.h | 10 | ||||
-rw-r--r-- | arch/sh/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/fpu.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/entry-common.S | 6 | ||||
-rw-r--r-- | arch/sh/kernel/irq.c | 57 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 6 | ||||
-rw-r--r-- | arch/sh/kernel/process_64.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 2 |
15 files changed, 59 insertions, 53 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 224f4bc9925e..9b0979f4df7a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -1,5 +1,6 @@ config SUPERH def_bool y + select ARCH_MIGHT_HAVE_PC_PARPORT select EXPERT select CLKDEV_LOOKUP select HAVE_IDE if HAS_IOPORT @@ -711,7 +712,6 @@ config CC_STACKPROTECTOR config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP - select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 23d7e45f9d14..5bc3a15465c7 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -15,6 +15,7 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> +#include <linux/mfd/tmio.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 280bea9e5e2b..231efbb68108 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -34,3 +34,4 @@ generic-y += termios.h generic-y += trace_clock.h generic-y += ucontext.h generic-y += xor.h +generic-y += preempt.h diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 06c4281aab65..09fc2bc8a790 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) save_fpu(tsk); release_fpu(regs); } else - tsk->fpu_counter = 0; + tsk->thread.fpu_counter = 0; } static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 21c5088788da..b9d9489a5012 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -81,7 +81,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) /* * Fix version; Note that we avoid version #0 - * to distingush NO_CONTEXT. + * to distinguish NO_CONTEXT. */ if (!asid) asid = MMU_CONTEXT_FIRST_VERSION; diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 8c00785c60d5..a33673b3687d 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -47,7 +47,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, if (!pg) return NULL; page = virt_to_page(pg); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + quicklist_free(QUICK_PT, NULL, pg); + return NULL; + } return page; } diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index e699a12cdcca..18e0377f72bb 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -111,6 +111,16 @@ struct thread_struct { /* Extended processor state */ union thread_xstate *xstate; + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; #define INIT_THREAD { \ diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 1cc7d3197143..eedd4f625d07 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -126,6 +126,16 @@ struct thread_struct { /* floating point info */ union thread_xstate *xstate; + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; #define INIT_MMAP \ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 45a93669289d..ad27ffa65e2e 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -41,8 +41,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #if defined(CONFIG_4KSTACKS) #define THREAD_SHIFT 12 #else diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index f8f7af51c128..4e332244ea75 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -44,7 +44,7 @@ void __fpu_state_restore(void) restore_fpu(tsk); task_thread_info(tsk)->status |= TS_USEDFPU; - tsk->fpu_counter++; + tsk->thread.fpu_counter++; } void fpu_state_restore(struct pt_regs *regs) diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 9b6e4beeb296..ca46834294b7 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -108,7 +108,7 @@ need_resched: and #(0xf0>>1), r0 ! interrupts off (exception path)? cmp/eq #(0xf0>>1), r0 bt noresched - mov.l 3f, r0 + mov.l 1f, r0 jsr @r0 ! call preempt_schedule_irq nop bra need_resched @@ -119,9 +119,7 @@ noresched: nop .align 2 -1: .long PREEMPT_ACTIVE -2: .long schedule -3: .long preempt_schedule_irq +1: .long preempt_schedule_irq #endif ENTRY(resume_userspace) diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 063af10ff3c1..0833736afa32 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void) { - unsigned long flags; struct thread_info *curctx; union irq_ctx *irqctx; u32 *isp; - if (in_interrupt()) - return; - - local_irq_save(flags); - - if (local_softirq_pending()) { - curctx = current_thread_info(); - irqctx = softirq_ctx[smp_processor_id()]; - irqctx->tinfo.task = curctx->task; - irqctx->tinfo.previous_sp = current_stack_pointer; - - /* build the stack frame on the softirq stack */ - isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); - - __asm__ __volatile__ ( - "mov r15, r9 \n" - "jsr @%0 \n" - /* switch to the softirq stack */ - " mov %1, r15 \n" - /* restore the thread stack */ - "mov r9, r15 \n" - : /* no outputs */ - : "r" (__do_softirq), "r" (isp) - : "memory", "r0", "r1", "r2", "r3", "r4", - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); - - /* - * Shouldn't happen, we returned above if in_interrupt(): - */ - WARN_ON_ONCE(softirq_count()); - } - - local_irq_restore(flags); + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + irqctx->tinfo.previous_sp = current_stack_pointer; + + /* build the stack frame on the softirq stack */ + isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + + __asm__ __volatile__ ( + "mov r15, r9 \n" + "jsr @%0 \n" + /* switch to the softirq stack */ + " mov %1, r15 \n" + /* restore the thread stack */ + "mov r9, r15 \n" + : /* no outputs */ + : "r" (__do_softirq), "r" (isp) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); } #else static inline void handle_one_irq(unsigned int irq) diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index ebd3933005b4..2885fc9d9dcd 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif ti->addr_limit = KERNEL_DS; ti->status &= ~TS_USEDFPU; - p->fpu_counter = 0; + p->thread.fpu_counter = 0; return 0; } *childregs = *current_pt_regs(); @@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) unlazy_fpu(prev, task_pt_regs(prev)); /* we're going to use this soon, after a few expensive things */ - if (next->fpu_counter > 5) + if (next->thread.fpu_counter > 5) prefetch(next_t->xstate); #ifdef CONFIG_MMU @@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ - if (next->fpu_counter > 5) + if (next->thread.fpu_counter > 5) __fpu_state_restore(); return prev; diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 174d124b419e..e2062e643341 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -374,7 +374,7 @@ asmlinkage void ret_from_kernel_thread(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { - struct pt_regs *childregs, *regs = current_pt_regs(); + struct pt_regs *childregs; #ifdef CONFIG_SH_FPU /* can't happen for a kernel thread */ @@ -393,7 +393,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); childregs->regs[2] = (unsigned long)arg; - childregs->regs[3] = (unsigned long)fn; + childregs->regs[3] = (unsigned long)usp; childregs->sr = (1 << 30); /* not user_mode */ childregs->sr |= SR_FD; /* Invalidate FPU flag */ p->thread.pc = (unsigned long) ret_from_kernel_thread; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 33890fd267cb..2d089fe2cba9 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -231,7 +231,7 @@ static void __init bootmem_init_one_node(unsigned int nid) if (!p->node_spanned_pages) return; - end_pfn = p->node_start_pfn + p->node_spanned_pages; + end_pfn = pgdat_end_pfn(p); total_pages = bootmem_bootmap_pages(p->node_spanned_pages); |