diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/asm/atomic.h | 55 | ||||
-rw-r--r-- | arch/mips/include/asm/cmpxchg.h | 22 | ||||
-rw-r--r-- | arch/mips/include/asm/irq.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 9 | ||||
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/cmpxchg.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/kprobes.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/mips/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 90 | ||||
-rw-r--r-- | arch/mips/lantiq/xway/dma.c | 1 | ||||
-rw-r--r-- | arch/mips/pci/pci-rt3883.c | 1 | ||||
-rw-r--r-- | arch/mips/pci/pci-xtalk-bridge.c | 1 | ||||
-rw-r--r-- | arch/mips/sgi-ip27/ip27-irq.c | 1 | ||||
-rw-r--r-- | arch/mips/sgi-ip30/ip30-irq.c | 1 |
16 files changed, 106 insertions, 89 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 27ad76791539..95e1f7f3597f 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -25,24 +25,25 @@ #include <asm/war.h> #define ATOMIC_OPS(pfx, type) \ -static __always_inline type pfx##_read(const pfx##_t *v) \ +static __always_inline type arch_##pfx##_read(const pfx##_t *v) \ { \ return READ_ONCE(v->counter); \ } \ \ -static __always_inline void pfx##_set(pfx##_t *v, type i) \ +static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \ { \ WRITE_ONCE(v->counter, i); \ } \ \ -static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \ +static __always_inline type \ +arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \ { \ - return cmpxchg(&v->counter, o, n); \ + return arch_cmpxchg(&v->counter, o, n); \ } \ \ -static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ +static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \ { \ - return xchg(&v->counter, n); \ + return arch_xchg(&v->counter, n); \ } ATOMIC_OPS(atomic, int) @@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64) #endif #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ -static __inline__ void pfx##_##op(type i, pfx##_t * v) \ +static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \ { \ type temp; \ \ @@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \ } #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ -static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ +static __inline__ type \ +arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ { \ type temp, result; \ \ @@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ } #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \ -static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ +static __inline__ type \ +arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ { \ int temp, result; \ \ @@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc) ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc) -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #ifdef CONFIG_64BIT ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd) ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd) -# define atomic64_add_return_relaxed atomic64_add_return_relaxed -# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed #endif /* CONFIG_64BIT */ #undef ATOMIC_OPS @@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc) ATOMIC_OPS(atomic, or, int, |=, or, ll, sc) ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc) -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #ifdef CONFIG_64BIT ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd) ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd) ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) -# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #endif #undef ATOMIC_OPS @@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) * The function returns the old value of @v minus @i. */ #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \ -static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ +static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \ { \ type temp, result; \ \ @@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ } ATOMIC_SIP_OP(atomic, int, subu, ll, sc) -#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) +#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) #ifdef CONFIG_64BIT ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd) -#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) +#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) #endif #undef ATOMIC_SIP_OP diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index ed8f3f3c4304..0b983800f48b 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size) } } -#define xchg(ptr, x) \ +#define arch_xchg(ptr, x) \ ({ \ __typeof__(*(ptr)) __res; \ \ @@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, } } -#define cmpxchg_local(ptr, old, new) \ +#define arch_cmpxchg_local(ptr, old, new) \ ((__typeof__(*(ptr))) \ __cmpxchg((ptr), \ (unsigned long)(__typeof__(*(ptr)))(old), \ (unsigned long)(__typeof__(*(ptr)))(new), \ sizeof(*(ptr)))) -#define cmpxchg(ptr, old, new) \ +#define arch_cmpxchg(ptr, old, new) \ ({ \ __typeof__(*(ptr)) __res; \ \ @@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, if (__SYNC_loongson3_war == 0) \ smp_mb__before_llsc(); \ \ - __res = cmpxchg_local((ptr), (old), (new)); \ + __res = arch_cmpxchg_local((ptr), (old), (new)); \ \ /* \ * In the Loongson3 workaround case __cmpxchg_asm() already \ @@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, }) #ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + arch_cmpxchg_local((ptr), (o), (n)); \ }) -#define cmpxchg64(ptr, o, n) \ +#define arch_cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ + arch_cmpxchg((ptr), (o), (n)); \ }) #else # include <asm-generic/cmpxchg-local.h> -# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) # ifdef CONFIG_SMP @@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, return ret; } -# define cmpxchg64(ptr, o, n) ({ \ +# define arch_cmpxchg64(ptr, o, n) ({ \ unsigned long long __old = (__typeof__(*(ptr)))(o); \ unsigned long long __new = (__typeof__(*(ptr)))(n); \ __typeof__(*(ptr)) __res; \ @@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, }) # else /* !CONFIG_SMP */ -# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +# define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) # endif /* !CONFIG_SMP */ #endif /* !CONFIG_64BIT */ diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index f021de661c3a..d1477ecb1af9 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -11,7 +11,6 @@ #include <linux/linkage.h> #include <linux/smp.h> -#include <linux/irqdomain.h> #include <asm/mipsmtregs.h> diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index fca4547d580f..696f6b009377 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -109,10 +109,11 @@ static inline bool kvm_is_error_hva(unsigned long addr) } struct kvm_vm_stat { - ulong remote_tlb_flush; + struct kvm_vm_stat_generic generic; }; struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; u64 wait_exits; u64 cache_exits; u64 signal_exits; @@ -142,12 +143,6 @@ struct kvm_vcpu_stat { #ifdef CONFIG_CPU_LOONGSON64 u64 vz_cpucfg_exits; #endif - u64 halt_successful_poll; - u64 halt_attempted_poll; - u64 halt_poll_success_ns; - u64 halt_poll_fail_ns; - u64 halt_poll_invalid; - u64 halt_wakeup; }; struct kvm_arch_memory_slot { diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 5735b2cd6f2a..04ca75278f02 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -78,7 +78,6 @@ void output_ptreg_defines(void) void output_task_defines(void) { COMMENT("MIPS task_struct offsets."); - OFFSET(TASK_STATE, task_struct, state); OFFSET(TASK_THREAD_INFO, task_struct, stack); OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 89107deb03fc..ac9c8cfb2ba9 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s do { old32 = load32; new32 = (load32 & ~mask) | (val << shift); - load32 = cmpxchg(ptr32, old32, new32); + load32 = arch_cmpxchg(ptr32, old32, new32); } while (load32 != old32); return (load32 & mask) >> shift; @@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, */ old32 = (load32 & ~mask) | (old << shift); new32 = (load32 & ~mask) | (new << shift); - load32 = cmpxchg(ptr32, old32, new32); + load32 = arch_cmpxchg(ptr32, old32, new32); if (load32 == old32) return old; } diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 54dfba8fa77c..75bff0f77319 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - if (kcb->kprobe_status & KPROBE_HIT_SS) { resume_execution(cur, regs, kcb); regs->cp0_status |= kcb->kprobe_old_SR; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index bff080db0294..73c8e7990a97 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -662,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task) unsigned long ra = 0; #endif - if (!task || task == current || task->state == TASK_RUNNING) + if (!task || task == current || task_is_running(task)) goto out; if (!task_stack_page(task)) goto out; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index ef86fbad8546..d542fb7af3ba 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -348,7 +348,6 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); - preempt_disable(); cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 30cc060857c7..c67250a956b8 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -2,7 +2,7 @@ # Makefile for KVM support for MIPS # -common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o) +common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o) EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 4d4af97dcc88..af9dd029a4e1 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -38,43 +38,63 @@ #define VECTORSPACING 0x100 /* for EI/VI mode */ #endif -struct kvm_stats_debugfs_item debugfs_entries[] = { - VCPU_STAT("wait", wait_exits), - VCPU_STAT("cache", cache_exits), - VCPU_STAT("signal", signal_exits), - VCPU_STAT("interrupt", int_exits), - VCPU_STAT("cop_unusable", cop_unusable_exits), - VCPU_STAT("tlbmod", tlbmod_exits), - VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits), - VCPU_STAT("tlbmiss_st", tlbmiss_st_exits), - VCPU_STAT("addrerr_st", addrerr_st_exits), - VCPU_STAT("addrerr_ld", addrerr_ld_exits), - VCPU_STAT("syscall", syscall_exits), - VCPU_STAT("resvd_inst", resvd_inst_exits), - VCPU_STAT("break_inst", break_inst_exits), - VCPU_STAT("trap_inst", trap_inst_exits), - VCPU_STAT("msa_fpe", msa_fpe_exits), - VCPU_STAT("fpe", fpe_exits), - VCPU_STAT("msa_disabled", msa_disabled_exits), - VCPU_STAT("flush_dcache", flush_dcache_exits), - VCPU_STAT("vz_gpsi", vz_gpsi_exits), - VCPU_STAT("vz_gsfc", vz_gsfc_exits), - VCPU_STAT("vz_hc", vz_hc_exits), - VCPU_STAT("vz_grr", vz_grr_exits), - VCPU_STAT("vz_gva", vz_gva_exits), - VCPU_STAT("vz_ghfc", vz_ghfc_exits), - VCPU_STAT("vz_gpa", vz_gpa_exits), - VCPU_STAT("vz_resvd", vz_resvd_exits), +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; +static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == + sizeof(struct kvm_vm_stat) / sizeof(u64)); + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, wait_exits), + STATS_DESC_COUNTER(VCPU, cache_exits), + STATS_DESC_COUNTER(VCPU, signal_exits), + STATS_DESC_COUNTER(VCPU, int_exits), + STATS_DESC_COUNTER(VCPU, cop_unusable_exits), + STATS_DESC_COUNTER(VCPU, tlbmod_exits), + STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits), + STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits), + STATS_DESC_COUNTER(VCPU, addrerr_st_exits), + STATS_DESC_COUNTER(VCPU, addrerr_ld_exits), + STATS_DESC_COUNTER(VCPU, syscall_exits), + STATS_DESC_COUNTER(VCPU, resvd_inst_exits), + STATS_DESC_COUNTER(VCPU, break_inst_exits), + STATS_DESC_COUNTER(VCPU, trap_inst_exits), + STATS_DESC_COUNTER(VCPU, msa_fpe_exits), + STATS_DESC_COUNTER(VCPU, fpe_exits), + STATS_DESC_COUNTER(VCPU, msa_disabled_exits), + STATS_DESC_COUNTER(VCPU, flush_dcache_exits), + STATS_DESC_COUNTER(VCPU, vz_gpsi_exits), + STATS_DESC_COUNTER(VCPU, vz_gsfc_exits), + STATS_DESC_COUNTER(VCPU, vz_hc_exits), + STATS_DESC_COUNTER(VCPU, vz_grr_exits), + STATS_DESC_COUNTER(VCPU, vz_gva_exits), + STATS_DESC_COUNTER(VCPU, vz_ghfc_exits), + STATS_DESC_COUNTER(VCPU, vz_gpa_exits), + STATS_DESC_COUNTER(VCPU, vz_resvd_exits), #ifdef CONFIG_CPU_LOONGSON64 - VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), + STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits), #endif - VCPU_STAT("halt_successful_poll", halt_successful_poll), - VCPU_STAT("halt_attempted_poll", halt_attempted_poll), - VCPU_STAT("halt_poll_invalid", halt_poll_invalid), - VCPU_STAT("halt_wakeup", halt_wakeup), - VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), - VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), - {NULL} +}; +static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) == + sizeof(struct kvm_vcpu_stat) / sizeof(u64)); + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), }; bool kvm_trace_guest_mode_change; diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index aeb1b989cd4e..63dccb2ed08b 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -12,6 +12,7 @@ #include <linux/spinlock.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/of.h> #include <lantiq_soc.h> #include <xway_dma.h> diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index aebd4964ea34..c48e23cf5b5e 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_pci.h> diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index d2216942af18..ab9bedb82b28 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -13,6 +13,7 @@ #include <linux/platform_data/xtalk-bridge.h> #include <linux/nvmem-consumer.h> #include <linux/crc16.h> +#include <linux/irqdomain.h> #include <asm/pci/bridge.h> #include <asm/paccess.h> diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 42df9fafa943..95c1bff1ab9f 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/bitops.h> diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c index e8374e4c705b..ba87704073c8 100644 --- a/arch/mips/sgi-ip30/ip30-irq.c +++ b/arch/mips/sgi-ip30/ip30-irq.c @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/percpu.h> #include <linux/spinlock.h> #include <linux/tick.h> |