summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r--arch/parisc/kernel/irq.c110
1 files changed, 92 insertions, 18 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 006385dbee66..197936d9359a 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -30,6 +30,9 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <asm/io.h>
+
+#include <asm/smp.h>
#undef PARISC_IRQ_CR16_COUNTS
@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
*/
static volatile unsigned long cpu_eiem = 0;
-static void cpu_set_eiem(void *info)
-{
- set_eiem((unsigned long) info);
-}
-
-static inline void cpu_disable_irq(unsigned int irq)
+static void cpu_disable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit;
- on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+ /* Do nothing on the other CPUs. If they get this interrupt,
+ * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
+ * handle it, and the set_eiem() at the bottom will ensure it
+ * then gets disabled */
}
static void cpu_enable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
- mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit;
- on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+
+ /* FIXME: while our interrupts aren't nested, we cannot reset
+ * the eiem mask if we're already in an interrupt. Once we
+ * implement nested interrupts, this can go away
+ */
+ if (!in_interrupt())
+ set_eiem(cpu_eiem);
+
+ /* This is just a simple NOP IPI. But what it does is cause
+ * all the other CPUs to do a set_eiem(cpu_eiem) at the end
+ * of the interrupt handler */
+ smp_send_all_nop();
}
static unsigned int cpu_startup_irq(unsigned int irq)
@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { }
+#ifdef CONFIG_SMP
+int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
+{
+ int cpu_dest;
+
+ /* timer and ipi have to always be received on all CPUs */
+ if (irq == TIMER_IRQ || irq == IPI_IRQ) {
+ /* Bad linux design decision. The mask has already
+ * been set; we must reset it */
+ irq_affinity[irq] = CPU_MASK_ALL;
+ return -EINVAL;
+ }
+
+ /* whatever mask they set, we just allow one CPU */
+ cpu_dest = first_cpu(*dest);
+ *dest = cpumask_of_cpu(cpu_dest);
+
+ return 0;
+}
+
+static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
+{
+ if (cpu_check_affinity(irq, &dest))
+ return;
+
+ irq_affinity[irq] = dest;
+}
+#endif
+
static struct hw_interrupt_type cpu_interrupt_type = {
.typename = "CPU",
.startup = cpu_startup_irq,
@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
.disable = cpu_disable_irq,
.ack = no_ack_irq,
.end = no_end_irq,
-// .set_affinity = cpu_set_affinity_irq,
+#ifdef CONFIG_SMP
+ .set_affinity = cpu_set_affinity_irq,
+#endif
};
int show_interrupts(struct seq_file *p, void *v)
@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide)
return -1;
}
+
+unsigned long txn_affinity_addr(unsigned int irq, int cpu)
+{
+#ifdef CONFIG_SMP
+ irq_affinity[irq] = cpumask_of_cpu(cpu);
+#endif
+
+ return cpu_data[cpu].txn_addr;
+}
+
+
unsigned long txn_alloc_addr(unsigned int virt_irq)
{
static int next_cpu = -1;
@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
if (next_cpu >= NR_CPUS)
next_cpu = 0; /* nothing else, assign monarch */
- return cpu_data[next_cpu].txn_addr;
+ return txn_affinity_addr(virt_irq, next_cpu);
}
@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq_enter();
/*
- * Only allow interrupt processing to be interrupted by the
- * timer tick
+ * Don't allow TIMER or IPI nested interrupts.
+ * Allowing any single interrupt to nest can lead to that CPU
+ * handling interrupts with all enabled interrupts unmasked.
*/
- set_eiem(EIEM_MASK(TIMER_IRQ));
+ set_eiem(0UL);
/* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
* 2) We loop here on EIRR contents in order to avoid
@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs)
if (!eirr_val)
break;
- if (eirr_val & EIEM_MASK(TIMER_IRQ))
- set_eiem(0);
-
mtctl(eirr_val, 23); /* reset bits we are going to process */
/* Work our way from MSb to LSb...same order we alloc EIRs */
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
+#ifdef CONFIG_SMP
+ cpumask_t dest = irq_affinity[irq];
+#endif
if (!(bit & eirr_val))
continue;
/* clear bit in mask - can exit loop sooner */
eirr_val &= ~bit;
+#ifdef CONFIG_SMP
+ /* FIXME: because generic set affinity mucks
+ * with the affinity before sending it to us
+ * we can get the situation where the affinity is
+ * wrong for our CPU type interrupts */
+ if (irq != TIMER_IRQ && irq != IPI_IRQ &&
+ !cpu_isset(smp_processor_id(), dest)) {
+ int cpu = first_cpu(dest);
+
+ printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
+ irq, smp_processor_id(), cpu);
+ gsc_writel(irq + CPU_IRQ_BASE,
+ cpu_data[cpu].hpa);
+ continue;
+ }
+#endif
+
__do_IRQ(irq, regs);
}
}
- set_eiem(cpu_eiem);
+
+ set_eiem(cpu_eiem); /* restore original mask */
irq_exit();
}
@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
+ .flags = SA_INTERRUPT,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
+ .flags = SA_INTERRUPT,
};
#endif