From 8bf1268f48ad9bf5d6401b4db913e6d85b0863f6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 Mar 2015 16:41:35 +0000 Subject: ARM: dma-api: fix off-by-one error in __dma_supported() When validating the mask against the amount of memory we have available (so that we can trap 32-bit DMA addresses with >32-bits memory), we had not taken account of the fact that max_pfn is the maximum PFN number plus one that would be in the system. There are several references in the code which bear this out: mm/page_owner.c: for (; pfn < max_pfn; pfn++) { } arch/x86/kernel/setup.c: high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 170a116d1b29..c27447653903 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) */ if (sizeof(mask) != sizeof(dma_addr_t) && mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) < max_pfn) { + dma_to_pfn(dev, ~0) < max_pfn - 1) { if (warn) { dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", mask); -- cgit v1.2.3 From 6d021b724481fbb908eb29384898deb9f00dfe70 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 Mar 2015 19:40:55 +0000 Subject: ARM: dump pgd, pmd and pte states on unhandled data abort faults It can be useful to dump the page table entries when an unhandled data abort fault occurs. This can aid debugging of these situations, for example, a STREX instruction causing an external abort on non-linefetch fault, as has been reported recently. Signed-off-by: Russell King --- arch/arm/mm/fault.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a982dc3190df..6333d9c17875 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); + show_pte(current->mm, addr); info.si_signo = inf->sig; info.si_errno = 0; -- cgit v1.2.3 From 5c95ed47f1777e9e9b1eb29e48f34e9af3139f29 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Thu, 12 Mar 2015 14:04:42 +0100 Subject: ARM: 8310/1: l2c: Fix prefetch settings dt parsing Allow prefetch settings overriding by device tree, in case l2x0_cache_size_of_parse() returns value, prefetch tuning properties are silently ignored. E.g. arm,double-linefill* and arm,prefetch*. This happens for example, when "cache-size" or "cache-sets" properties haven't been filled in l2c dt node. Comments from Fabrice Gasnier: Allow device tree to override the L2C prefetch settings, even when l2x0_cache_size_of_parse() fails to parse the cache geometry due to (eg) missing "cache-size" or "cache-sets" properties. Signed-off-by: Fabrice Gasnier Reviewed-by: Tomasz Figa Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c6c7696b8db9..8f15f70622a6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np, } ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); - if (ret) - return; - - switch (assoc) { - case 16: - *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; - *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; - *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; - break; - case 8: - *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; - *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; - break; - default: - pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", - assoc); - break; + if (!ret) { + switch (assoc) { + case 16: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + case 8: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + default: + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", + assoc); + break; + } } prefetch = l2x0_saved_regs.prefetch_ctrl; -- cgit v1.2.3 From f2ca09f381a59e1eddb89aa70207740c2ee0fe94 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Mar 2015 21:41:45 +0100 Subject: ARM: 8311/1: Don't use is_module_addr in setting page attributes The set_memory_* functions currently only support module addresses. The addresses are validated using is_module_addr. That function is special though and relies on internal state in the module subsystem to work properly. At the time of module initialization and calling set_memory_*, it's too early for is_module_addr to work properly so it always returns false. Rather than be subject to the whims of the module state, just bounds check against the module virtual address range. Signed-off-by: Laura Abbott Signed-off-by: Russell King --- arch/arm/mm/pageattr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 004e35cdcfff..cf30daff8932 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (!is_module_address(start) || !is_module_address(end - 1)) + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || start >= MODULES_END) return -EINVAL; data.set_mask = set_mask; -- cgit v1.2.3 From 526299ce4eab2e35ba733b03771d112147676b12 Mon Sep 17 00:00:00 2001 From: Mason Date: Tue, 17 Mar 2015 21:37:25 +0100 Subject: ARM: 8313/1: Use read_cpuid_ext() macro instead of inline asm Replace inline asm statement in __get_cpu_architecture() with equivalent macro invocation, i.e. read_cpuid_ext(CPUID_EXT_MMFR0); As an added bonus, this squashes a potential bug, described by Paul Walmsley in commit 067e710b9a98 ("ARM: 7801/1: prevent gcc 4.5 from reordering extended CP15 reads above is_smp() test"). Signed-off-by: Marc Gonzalez Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e55408e96559..1d60bebea4b8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -246,12 +246,9 @@ static int __get_cpu_architecture(void) if (cpu_arch) cpu_arch += CPU_ARCH_ARMv3; } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { - unsigned int mmfr0; - /* Revised CPUID format. Read the Memory Model Feature * Register 0 and check for VMSAv7 or PMSAv7 */ - asm("mrc p15, 0, %0, c0, c1, 4" - : "=r" (mmfr0)); + unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); if ((mmfr0 & 0x0000000f) >= 0x00000003 || (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; -- cgit v1.2.3 From 5b0d4b5514bbcce69b516d0742f2cfc84ebd6db3 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 19 Mar 2015 16:05:57 -0400 Subject: sparc: perf: Remove redundant perf_pmu_{en|dis}able calls perf_pmu_disable is called by core perf code before pmu->del and the enable function is called by core perf code afterwards. No need to call again within sparc_pmu_del. Ditto for pmu->add and sparc_pmu_add. Signed-off-by: David Ahern Acked-by: Bob Picco Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..6dc4e793df4c 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1101,7 +1101,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) int i; local_irq_save(flags); - perf_pmu_disable(event->pmu); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { @@ -1127,7 +1126,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) } } - perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -1361,7 +1359,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) unsigned long flags; local_irq_save(flags); - perf_pmu_disable(event->pmu); n0 = cpuc->n_events; if (n0 >= sparc_pmu->max_hw_events) @@ -1394,7 +1391,6 @@ nocheck: ret = 0; out: - perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } -- cgit v1.2.3 From d51291cb8f32bfae6b331e1838651f3ddefa73a5 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 19 Mar 2015 16:06:17 -0400 Subject: sparc: perf: Make counting mode actually work Currently perf-stat (aka, counting mode) does not work: $ perf stat ls ... Performance counter stats for 'ls': 1.585665 task-clock (msec) # 0.580 CPUs utilized 24 context-switches # 0.015 M/sec 0 cpu-migrations # 0.000 K/sec 86 page-faults # 0.054 M/sec cycles stalled-cycles-frontend stalled-cycles-backend instructions branches branch-misses 0.002735100 seconds time elapsed The reason is that state is never reset (stays with PERF_HES_UPTODATE set). Add a call to sparc_pmu_enable_event during the added_event handling. Clean up the encoding since pmu_start calls sparc_pmu_enable_event which does the same. Passing PERF_EF_RELOAD to sparc_pmu_start means the call to sparc_perf_event_set_period can be removed as well. With this patch: $ perf stat ls ... Performance counter stats for 'ls': 1.552890 task-clock (msec) # 0.552 CPUs utilized 24 context-switches # 0.015 M/sec 0 cpu-migrations # 0.000 K/sec 86 page-faults # 0.055 M/sec 5,748,997 cycles # 3.702 GHz stalled-cycles-frontend:HG stalled-cycles-backend:HG 1,684,362 instructions:HG # 0.29 insns per cycle 295,133 branches:HG # 190.054 M/sec 28,007 branch-misses:HG # 9.49% of all branches 0.002815665 seconds time elapsed Signed-off-by: David Ahern Acked-by: Bob Picco Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 6dc4e793df4c..af53c25da2e7 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -960,6 +960,8 @@ out: cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; } +static void sparc_pmu_start(struct perf_event *event, int flags); + /* On this PMU each PIC has it's own PCR control register. */ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) { @@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) struct perf_event *cp = cpuc->event[i]; struct hw_perf_event *hwc = &cp->hw; int idx = hwc->idx; - u64 enc; if (cpuc->current_idx[i] != PIC_NO_INDEX) continue; - sparc_perf_event_set_period(cp, hwc, idx); cpuc->current_idx[i] = idx; - enc = perf_event_get_enc(cpuc->events[i]); - cpuc->pcr[idx] &= ~mask_for_index(idx); - if (hwc->state & PERF_HES_STOPPED) - cpuc->pcr[idx] |= nop_for_index(idx); - else - cpuc->pcr[idx] |= event_encoding(enc, idx); + sparc_pmu_start(cp, PERF_EF_RELOAD); } out: for (i = 0; i < cpuc->n_events; i++) { -- cgit v1.2.3 From b5aff55d89c27aedcae9521155b81b6aebb6c5d8 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 19 Mar 2015 16:06:37 -0400 Subject: sparc: perf: Add support M7 processor The M7 processor has a different hypervisor group id and different PCR fast trap values. PIC read/write functions and PCR bit fields are the same as the T4 so those are reused. Signed-off-by: David Ahern Acked-by: Bob Picco Signed-off-by: David S. Miller --- arch/sparc/include/asm/hypervisor.h | 12 +++++++++++ arch/sparc/kernel/hvapi.c | 1 + arch/sparc/kernel/hvcalls.S | 16 +++++++++++++++ arch/sparc/kernel/pcr.c | 33 ++++++++++++++++++++++++++++++ arch/sparc/kernel/perf_event.c | 40 +++++++++++++++++++++++++++++++++++++ 5 files changed, 102 insertions(+) (limited to 'arch') diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 4f6725ff4c33..f5b6537306f0 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, unsigned long reg_val); #endif + +#define HV_FAST_M7_GET_PERFREG 0x43 +#define HV_FAST_M7_SET_PERFREG 0x44 + +#ifndef __ASSEMBLY__ +unsigned long sun4v_m7_get_perfreg(unsigned long reg_num, + unsigned long *reg_val); +unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, + unsigned long reg_val); +#endif + /* Function numbers for HV_CORE_TRAP. */ #define HV_CORE_SET_VER 0x00 #define HV_CORE_PUTCHAR 0x01 @@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, #define HV_GRP_SDIO 0x0108 #define HV_GRP_SDIO_ERR 0x0109 #define HV_GRP_REBOOT_DATA 0x0110 +#define HV_GRP_M7_PERF 0x0114 #define HV_GRP_NIAG_PERF 0x0200 #define HV_GRP_FIRE_PERF 0x0201 #define HV_GRP_N2_CPU 0x0202 diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 5c55145bfbf0..662500fa555f 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -48,6 +48,7 @@ static struct api_info api_table[] = { { .group = HV_GRP_VT_CPU, }, { .group = HV_GRP_T5_CPU, }, { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, + { .group = HV_GRP_M7_PERF, }, }; static DEFINE_SPINLOCK(hvapi_lock); diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index caedf8320416..afbaba52d2f1 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg) retl nop ENDPROC(sun4v_t5_set_perfreg) + +ENTRY(sun4v_m7_get_perfreg) + mov %o1, %o4 + mov HV_FAST_M7_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_m7_get_perfreg) + +ENTRY(sun4v_m7_set_perfreg) + mov HV_FAST_M7_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_m7_set_perfreg) diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7e967c8018c8..eb978c77c76a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = { .pcr_nmi_disable = PCR_N4_PICNPT, }; +static u64 m7_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_m7_get_perfreg(reg_num, &val); + + return val; +} + +static void m7_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_m7_set_perfreg(reg_num, val); +} + +static const struct pcr_ops m7_pcr_ops = { + .read_pcr = m7_pcr_read, + .write_pcr = m7_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, +}; static unsigned long perf_hsvc_group; static unsigned long perf_hsvc_major; @@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void) perf_hsvc_group = HV_GRP_T5_CPU; break; + case SUN4V_CHIP_SPARC_M7: + perf_hsvc_group = HV_GRP_M7_PERF; + break; + default: return -ENODEV; } @@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void) pcr_ops = &n5_pcr_ops; break; + case SUN4V_CHIP_SPARC_M7: + pcr_ops = &m7_pcr_ops; + break; + default: ret = -ENODEV; break; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index af53c25da2e7..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { .num_pic_regs = 4, }; +static void sparc_m7_write_pmc(int idx, u64 val) +{ + u64 pcr; + + pcr = pcr_ops->read_pcr(idx); + /* ensure ov and ntc are reset */ + pcr &= ~(PCR_N4_OV | PCR_N4_NTC); + + pcr_ops->write_pic(idx, val & 0xffffffff); + + pcr_ops->write_pcr(idx, pcr); +} + +static const struct sparc_pmu sparc_m7_pmu = { + .event_map = niagara4_event_map, + .cache_map = &niagara4_cache_map, + .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), + .read_pmc = sparc_vt_read_pmc, + .write_pmc = sparc_m7_write_pmc, + .upper_shift = 5, + .lower_shift = 5, + .event_mask = 0x7ff, + .user_bit = PCR_N4_UTRACE, + .priv_bit = PCR_N4_STRACE, + + /* We explicitly don't support hypervisor tracing. */ + .hv_bit = 0, + + .irq_bit = PCR_N4_TOE, + .upper_nop = 0, + .lower_nop = 0, + .flags = 0, + .max_hw_events = 4, + .num_pcrs = 4, + .num_pic_regs = 4, +}; static const struct sparc_pmu *sparc_pmu __read_mostly; static u64 event_encoding(u64 event_id, int idx) @@ -1658,6 +1694,10 @@ static bool __init supported_pmu(void) sparc_pmu = &niagara4_pmu; return true; } + if (!strcmp(sparc_pmu_type, "sparc-m7")) { + sparc_pmu = &sparc_m7_pmu; + return true; + } return false; } -- cgit v1.2.3 From 31aaa98c248da766ece922bbbe8cc78cfd0bc920 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 19 Mar 2015 16:06:53 -0400 Subject: sparc: Touch NMI watchdog when walking cpus and calling printk With the increase in number of CPUs calls to functions that dump output to console (e.g., arch_trigger_all_cpu_backtrace) can take a long time to complete. If IRQs are disabled eventually the NMI watchdog kicks in and creates more havoc. Avoid by telling the NMI watchdog everything is ok. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- arch/sparc/kernel/process_64.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 0be7bf978cb1..46a59643bb1c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", gp->tpc, gp->o7, gp->i7, gp->rpc); } + + touch_nmi_watchdog(); } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); @@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) (cpu == this_cpu ? '*' : ' '), cpu, pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); + + touch_nmi_watchdog(); } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); -- cgit v1.2.3 From 9e8ce4b96b781b003e3174fbbc62e1d4388c8b8f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 20 Mar 2015 14:56:19 +0100 Subject: Revert "x86/PCI: Refine the way to release PCI IRQ resources" Commit b4b55cda5874 (Refine the way to release PCI IRQ resources) introduced a regression in the PCI IRQ resource management by causing the IRQ resource of a device, established when pci_enabled_device() is called on a fully disabled device, to be released when the driver is unbound from the device, regardless of the enable_cnt. This leads to the situation that an ill-behaved driver can now make a device unusable to subsequent drivers by an imbalance in their use of pci_enable/disable_device(). That is a serious problem for secondary drivers like vfio-pci, which are innocent of the transgressions of the previous driver. Since the solution of this problem is not immediate and requires further discussion, revert commit b4b55cda5874 and the issue it was supposed to address (a bug related to xen-pciback) will be taken care of in a different way going forward. Reported-by: Alex Williamson Signed-off-by: Rafael J. Wysocki --- arch/x86/include/asm/pci_x86.h | 2 ++ arch/x86/pci/common.c | 34 ++++++---------------------------- arch/x86/pci/intel_mid_pci.c | 4 ++-- arch/x86/pci/irq.c | 15 ++++++++++++++- 4 files changed, 24 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); +extern bool mp_should_keep_irq(struct device *dev); + struct pci_raw_ops { int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3d2612b68694..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void) } } -/* - * Some device drivers assume dev->irq won't change after calling - * pci_disable_device(). So delay releasing of IRQ resource to driver - * unbinding time. Otherwise it will break PM subsystem and drivers - * like xen-pciback etc. - */ -static int pci_irq_notifier(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct pci_dev *dev = to_pci_dev(data); - - if (action != BUS_NOTIFY_UNBOUND_DRIVER) - return NOTIFY_DONE; - - if (pcibios_disable_irq) - pcibios_disable_irq(dev); - - return NOTIFY_OK; -} - -static struct notifier_block pci_irq_nb = { - .notifier_call = pci_irq_notifier, - .priority = INT_MIN, -}; - int __init pcibios_init(void) { if (!raw_pci_ops) { @@ -550,9 +525,6 @@ int __init pcibios_init(void) if (pci_bf_sort >= pci_force_bf) pci_sort_breadthfirst(); - - bus_register_notifier(&pci_bus_type, &pci_irq_nb); - return 0; } @@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return 0; } +void pcibios_disable_device (struct pci_dev *dev) +{ + if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) + pcibios_disable_irq(dev); +} + int pci_ext_cfg_avail(void) { if (raw_pci_ext_ops) diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index efb849323c74..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev) { - if (dev->irq_managed && dev->irq > 0) { + if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && + dev->irq > 0) { mp_unmap_irq(dev->irq); dev->irq_managed = 0; - dev->irq = 0; } } diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e71b3dbd87b8..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev) return 0; } +bool mp_should_keep_irq(struct device *dev) +{ + if (dev->power.is_prepared) + return true; +#ifdef CONFIG_PM + if (dev->power.runtime_status == RPM_SUSPENDING) + return true; +#endif + + return false; +} + static void pirq_disable_irq(struct pci_dev *dev) { - if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { + if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && + dev->irq_managed && dev->irq) { mp_unmap_irq(dev->irq); dev->irq = 0; dev->irq_managed = 0; -- cgit v1.2.3 From 130c93fd10c4d150e39d8879420c1351aa207fa9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Mar 2015 15:43:00 +0000 Subject: arm64: efi: don't restore TTBR0 if active_mm points at init_mm init_mm isn't a normal mm: it has swapper_pg_dir as its pgd (which contains kernel mappings) and is used as the active_mm for the idle thread. When restoring the pgd after an EFI call, we write current->active_mm into TTBR0. If the current task is actually the idle thread (e.g. when initialising the EFI RTC before entering userspace), then the TLB can erroneously populate itself with junk global entries as a result of speculative table walks. When we do eventually return to userspace, the task can end up hitting these junk mappings leading to lockups, corruption or crashes. This patch fixes the problem in the same way as the CPU suspend code by ensuring that we never switch to the init_mm in efi_set_pgd and instead point TTBR0 at the zero page. A check is also added to cpu_switch_mm to BUG if we get passed swapper_pg_dir. Reviewed-by: Ard Biesheuvel Fixes: f3cdfd239da5 ("arm64/efi: move SetVirtualAddressMap() to UEFI stub") Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/proc-fns.h | 6 +++++- arch/arm64/kernel/efi.c | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#define cpu_switch_mm(pgd,mm) \ +do { \ + BUG_ON(pgd == swapper_pg_dir); \ + cpu_do_switch_mm(virt_to_phys(pgd),mm); \ +} while (0) #define cpu_get_pgd() \ ({ \ diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2b8d70164428..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - cpu_switch_mm(mm->pgd, mm); + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); if (icache_is_aivivt()) __flush_icache_all(); -- cgit v1.2.3 From 7132813c384515c9dede1ae20e56f3895feb7f1e Mon Sep 17 00:00:00 2001 From: "Suzuki K. Poulose" Date: Thu, 19 Mar 2015 18:17:09 +0000 Subject: arm64: Honor __GFP_ZERO in dma allocations Current implementation doesn't zero out the pages allocated. Honor the __GFP_ZERO flag and zero out if set. Cc: # v3.14+ Acked-by: Will Deacon Signed-off-by: Suzuki K. Poulose Signed-off-by: Catalin Marinas --- arch/arm64/mm/dma-mapping.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 58e0c2bdde04..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); -static void *__alloc_from_pool(size_t size, struct page **ret_page) +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; @@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) *ret_page = phys_to_page(phys); ptr = (void *)val; + if (flags & __GFP_ZERO) + memset(ptr, 0, size); } return ptr; @@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; + void *addr; size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); - return page_address(page); + addr = page_address(page); + if (flags & __GFP_ZERO) + memset(addr, 0, size); + return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } @@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, if (!coherent && !(flags & __GFP_WAIT)) { struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page); + void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); -- cgit v1.2.3 From 2077cef4d5c29cf886192ec32066f783d6a80db8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 23 Mar 2015 09:22:10 -0700 Subject: sparc64: Fix several bugs in memmove(). Firstly, handle zero length calls properly. Believe it or not there are a few of these happening during early boot. Next, we can't just drop to a memcpy() call in the forward copy case where dst <= src. The reason is that the cache initializing stores used in the Niagara memcpy() implementations can end up clearing out cache lines before we've sourced their original contents completely. For example, considering NG4memcpy, the main unrolled loop begins like this: load src + 0x00 load src + 0x08 load src + 0x10 load src + 0x18 load src + 0x20 store dst + 0x00 Assume dst is 64 byte aligned and let's say that dst is src - 8 for this memcpy() call. That store at the end there is the one to the first line in the cache line, thus clearing the whole line, which thus clobbers "src + 0x28" before it even gets loaded. To avoid this, just fall through to a simple copy only mildly optimized for the case where src and dst are 8 byte aligned and the length is a multiple of 8 as well. We could get fancy and call GENmemcpy() but this is good enough for how this thing is actually used. Reported-by: David Ahern Reported-by: Bob Picco Signed-off-by: David S. Miller --- arch/sparc/lib/memmove.S | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index b7f6334e159f..857ad4f8905f 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S @@ -8,9 +8,11 @@ .text ENTRY(memmove) /* o0=dst o1=src o2=len */ - mov %o0, %g1 + brz,pn %o2, 99f + mov %o0, %g1 + cmp %o0, %o1 - bleu,pt %xcc, memcpy + bleu,pt %xcc, 2f add %o1, %o2, %g7 cmp %g7, %o0 bleu,pt %xcc, memcpy @@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ stb %g7, [%o0] bne,pt %icc, 1b sub %o0, 1, %o0 - +99: retl mov %g1, %o0 + + /* We can't just call memcpy for these memmove cases. On some + * chips the memcpy uses cache initializing stores and when dst + * and src are close enough, those can clobber the source data + * before we've loaded it in. + */ +2: or %o0, %o1, %g7 + or %o2, %g7, %g7 + andcc %g7, 0x7, %g0 + bne,pn %xcc, 4f + nop + +3: ldx [%o1], %g7 + add %o1, 8, %o1 + subcc %o2, 8, %o2 + add %o0, 8, %o0 + bne,pt %icc, 3b + stx %g7, [%o0 - 0x8] + ba,a,pt %xcc, 99b + +4: ldub [%o1], %g7 + add %o1, 1, %o1 + subcc %o2, 1, %o2 + add %o0, 1, %o0 + bne,pt %icc, 4b + stb %g7, [%o0 - 0x1] + ba,a,pt %xcc, 99b ENDPROC(memmove) -- cgit v1.2.3