diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-20 12:34:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-20 12:34:36 -0700 |
commit | 608faf1ff2341dd89307eea649c8efaa6d08b911 (patch) | |
tree | b5e67ff0a8e8470de2cf1a8fc9edc83483fc406b /arch | |
parent | a939b96cccdb65df80a52447ec8e4a6d79c56dbb (diff) | |
parent | 05f0ecbda5c215279f8e0f852e1606f441a11236 (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] fix allmodconfig compilation breakage.
[IA64] smp_flush_tlb_mm() should only send IPI's to cpus in cpu_vm_mask
[IA64] export smp_send_reschedule
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/include/asm/paravirt_privop.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 14 |
2 files changed, 6 insertions, 10 deletions
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h index 3d2951130b5f..8f6cb11c9fae 100644 --- a/arch/ia64/include/asm/paravirt_privop.h +++ b/arch/ia64/include/asm/paravirt_privop.h @@ -445,7 +445,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, register unsigned long ia64_intri_res asm ("r8"); \ register unsigned long __reg asm ("r8") = (reg); \ \ - BUILD_BUG_ON(!__builtin_constant_p(reg)); \ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ PARAVIRT_TYPE(GETREG) \ + (reg)) \ @@ -464,7 +463,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, register unsigned long ia64_clobber1 asm ("r8"); \ register unsigned long ia64_clobber2 asm ("r9"); \ \ - BUILD_BUG_ON(!__builtin_constant_p(reg)); \ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ PARAVIRT_TYPE(SETREG) \ + (reg)) \ diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 2ea4199d9c57..5230eaafd83f 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -225,6 +225,7 @@ smp_send_reschedule (int cpu) { platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } +EXPORT_SYMBOL_GPL(smp_send_reschedule); /* * Called with preemption disabled. @@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm) return; } + smp_call_function_mask(mm->cpu_vm_mask, + (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); + local_irq_disable(); + local_finish_flush_tlb_mm(mm); + local_irq_enable(); preempt_enable(); - /* - * We could optimize this further by using mm->cpu_vm_mask to track which CPUs - * have been running in the address space. It's not clear that this is worth the - * trouble though: to avoid races, we have to raise the IPI on the target CPU - * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is - * rather trivial. - */ - on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); } void arch_send_call_function_single_ipi(int cpu) |