diff options
58 files changed, 403 insertions, 180 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 229c66b12cc2..8197fbd70a3e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5884,7 +5884,7 @@ F: drivers/i2c/busses/i2c-omap.c F: include/linux/i2c-omap.h OMAP DEVICE TREE SUPPORT -M: Benoît Cousson <b-cousson@ti.com> +M: Benoît Cousson <bcousson@baylibre.com> M: Tony Lindgren <tony@atomide.com> L: linux-omap@vger.kernel.org L: devicetree@vger.kernel.org @@ -5964,14 +5964,14 @@ S: Maintained F: drivers/char/hw_random/omap-rng.c OMAP HWMOD SUPPORT -M: Benoît Cousson <b-cousson@ti.com> +M: Benoît Cousson <bcousson@baylibre.com> M: Paul Walmsley <paul@pwsan.com> L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod.* OMAP HWMOD DATA FOR OMAP4-BASED DEVICES -M: Benoît Cousson <b-cousson@ti.com> +M: Benoît Cousson <bcousson@baylibre.com> L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Linux for Workgroups # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d59b70c6a6a0..3d77dbe406f4 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -14,11 +14,11 @@ compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; chosen { - bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; }; memory { - reg = <0x20000000 0x10000000>; + reg = <0x20000000 0x8000000>; }; clocks { diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index b753855b2058..49e3c45818c2 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi @@ -94,8 +94,9 @@ usb0: ohci@00600000 { status = "okay"; - num-ports = <2>; - atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW + num-ports = <3>; + atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */ + &pioD 19 GPIO_ACTIVE_LOW &pioD 20 GPIO_ACTIVE_LOW >; }; diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index 365760b33a26..40e6fb280333 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -830,6 +830,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&gpio 24 0>; /* PD0 */ + regulator-always-on; + regulator-boot-on; }; }; diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index ed4b901b0227..37c93d3c4812 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -412,6 +412,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&gpio 170 0>; /* PV2 */ + regulator-always-on; + regulator-boot-on; }; }; diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ab67c94db280..a3d0ebad78a1 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -588,6 +588,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ + regulator-always-on; + regulator-boot-on; }; vbus3_reg: regulator@3 { @@ -598,6 +600,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ + regulator-always-on; + regulator-boot-on; }; }; diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4a5199070430..db9cf692d4dd 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, #define access_pmintenclr pm_fake /* Architected CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. */ static const struct coproc_reg cp15_regs[] = { /* CSSELR: swapped by interrupt.S. */ @@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c0_CSSELR }, /* TTBR0/TTBR1: swapped by interrupt.S. */ - { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, - { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, + { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, + { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, /* TTBCR: swapped by interrupt.S. */ { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, @@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c6_IFAR }, /* PAR swapped by interrupt.S */ - { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, + { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, /* * DC{C,I,CI}SW operations: @@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) | KVM_REG_ARM_OPC1_MASK)) return false; params->is_64bit = true; - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) + /* CRm to CRn: see cp15_to_index for details */ + params->CRn = ((id & KVM_REG_ARM_CRM_MASK) >> KVM_REG_ARM_CRM_SHIFT); params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) >> KVM_REG_ARM_OPC1_SHIFT); params->Op2 = 0; - params->CRn = 0; + params->CRm = 0; return true; default: return false; @@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) if (reg->is_64) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); + /* + * CRn always denotes the primary coproc. reg. nr. for the + * in-kernel representation, but the user space API uses the + * CRm for the encoding, because it is modelled after the + * MRRC/MCRR instructions: see the ARM ARM rev. c page + * B3-1445 + */ + val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); } else { val |= KVM_REG_SIZE_U32; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index b7301d3e4799..0461d5c8d3de 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, return -1; if (i1->CRn != i2->CRn) return i1->CRn - i2->CRn; + if (i1->is_64 != i2->is_64) + return i2->is_64 - i1->is_64; if (i1->CRm != i2->CRm) return i1->CRm - i2->CRm; if (i1->Op1 != i2->Op1) @@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define CRn(_x) .CRn = _x #define CRm(_x) .CRm = _x +#define CRm64(_x) .CRn = _x, .CRm = 0 #define Op1(_x) .Op1 = _x #define Op2(_x) .Op2 = _x #define is64 .is_64 = true diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index 685063a6d0cf..cf93472b9dd6 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c @@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, /* * A15-specific CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. */ static const struct coproc_reg a15_regs[] = { /* MPIDR: we use VMPIDR for guest access. */ diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index b8e06b7a2833..0c25d9487d53 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_exit_mmio *mmio) { - unsigned long rt, len; + unsigned long rt; + int len; bool is_write, sign_extend; if (kvm_vcpu_dabt_isextabt(vcpu)) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ca6bea4859b4..0988d9e04dd4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) return p; } +static bool page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { pmd_t *pmd_table = pmd_offset(pud, 0); @@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) put_page(virt_to_page(pmd)); } -static bool pmd_empty(pmd_t *pmd) -{ - struct page *pmd_page = virt_to_page(pmd); - return page_count(pmd_page) == 1; -} - static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) { if (pte_present(*pte)) { @@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) } } -static bool pte_empty(pte_t *pte) -{ - struct page *pte_page = virt_to_page(pte); - return page_count(pte_page) == 1; -} - static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { @@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; - u64 range; + u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { - addr += PUD_SIZE; + addr = pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr += PMD_SIZE; + addr = pmd_addr_end(addr, end); continue; } pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); - range = PAGE_SIZE; + next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ - if (pte_empty(pte)) { + if (page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); - range = PMD_SIZE; - if (pmd_empty(pmd)) { + next = pmd_addr_end(addr, end); + if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); - range = PUD_SIZE; + next = pud_addr_end(addr, end); } } - addr += range; + addr = next; } } diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 2abee6626aac..916e5a142917 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c @@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), + CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk), + CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk), CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef81..139e42da25f0 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW_SYNDROME, + .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, }; diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a33686a6fbb2..fa4bfaf952d8 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { .parts = davinci_evm_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, .timing = &davinci_evm_nandflash_timing, }; diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index fbb8e5ab1dc1..0c005e876cac 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .options = 0, }; diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 2bc112adf565..808233b60e3d 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { .parts = davinci_ntosd2_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, }; diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index f6eeb87e4e95..827d15009a86 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = { }; static struct musb_hdrc_platform_data tusb_data = { -#ifdef CONFIG_USB_GADGET_MUSB_HDRC .mode = MUSB_OTG, -#else - .mode = MUSB_HOST, -#endif .set_power = tusb_set_power, .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ .power = 100, /* Max 100 mA VBUS for host mode */ diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index d2ea68ea678a..7735105561d8 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c @@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = { static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_ULPI, - .mode = MUSB_PERIPHERAL, + .mode = MUSB_OTG, .power = 0, }; diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 8c4de2708cf2..bc897231bd10 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = { }; static struct musb_hdrc_platform_data musb_plat = { -#ifdef CONFIG_USB_GADGET_MUSB_HDRC .mode = MUSB_OTG, -#else - .mode = MUSB_HOST, -#endif + /* .clock is set dynamically */ .config = &musb_config, diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c index 3e5c4619caa5..50a3ea0037db 100644 --- a/arch/arm/plat-samsung/init.c +++ b/arch/arm/plat-samsung/init.c @@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode, printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); - if (cpu->map_io == NULL || cpu->init == NULL) { + if (cpu->init == NULL) { printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); panic("Unsupported Samsung CPU"); } - cpu->map_io(); + if (cpu->map_io) + cpu->map_io(); } /* s3c24xx_init_clocks diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c9770ba5c7df..8a6295c86209 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) per_cpu(xen_vcpu, cpu) = vcpup; enable_percpu_irq(xen_events_irq, 0); + put_cpu(); } static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index c92de4163eba..b25763bc0ec4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -42,14 +42,15 @@ #define TPIDR_EL1 18 /* Thread ID, Privileged */ #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ +#define PAR_EL1 21 /* Physical Address Register */ /* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 21 /* Domain Access Control Register */ -#define IFSR32_EL2 22 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ -#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ -#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ -#define NR_SYS_REGS 27 +#define DACR32_EL2 22 /* Domain Access Control Register */ +#define IFSR32_EL2 23 /* Instruction Fault Status Register */ +#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ +#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ +#define TEECR32_EL1 26 /* ThumbEE Configuration Register */ +#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ +#define NR_SYS_REGS 28 /* 32bit mapping */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ @@ -69,6 +70,8 @@ #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ +#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ +#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 644d73956864..0859a4ddd1e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -129,7 +129,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_page_cache; /* Target CPU and feature flags */ - u32 target; + int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); /* Detect first run of a vcpu */ diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..12e6ccb88691 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } @@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index ff985e3d8b72..1ac0bbbdddb2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -214,6 +214,7 @@ __kvm_hyp_code_start: mrs x21, tpidr_el1 mrs x22, amair_el1 mrs x23, cntkctl_el1 + mrs x24, par_el1 stp x4, x5, [x3] stp x6, x7, [x3, #16] @@ -225,6 +226,7 @@ __kvm_hyp_code_start: stp x18, x19, [x3, #112] stp x20, x21, [x3, #128] stp x22, x23, [x3, #144] + str x24, [x3, #160] .endm .macro restore_sysregs @@ -243,6 +245,7 @@ __kvm_hyp_code_start: ldp x18, x19, [x3, #112] ldp x20, x21, [x3, #128] ldp x22, x23, [x3, #144] + ldr x24, [x3, #160] msr vmpidr_el2, x4 msr csselr_el1, x5 @@ -264,6 +267,7 @@ __kvm_hyp_code_start: msr tpidr_el1, x21 msr amair_el1, x22 msr cntkctl_el1, x23 + msr par_el1, x24 .endm .macro skip_32bit_state tmp, target @@ -600,6 +604,8 @@ END(__kvm_vcpu_run) // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); ENTRY(__kvm_tlb_flush_vmid_ipa) + dsb ishst + kern_hyp_va x0 ldr x2, [x0, #KVM_VTTBR] msr vttbr_el2, x2 @@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) ENDPROC(__kvm_tlb_flush_vmid_ipa) ENTRY(__kvm_flush_vm_context) + dsb ishst tlbi alle1is ic ialluis dsb sy @@ -753,6 +760,10 @@ el1_trap: */ tbnz x1, #7, 1f // S1PTW is set + /* Preserve PAR_EL1 */ + mrs x3, par_el1 + push x3, xzr + /* * Permission fault, HPFAR_EL2 is invalid. * Resolve the IPA the hard way using the guest VA. @@ -766,6 +777,8 @@ el1_trap: /* Read result */ mrs x3, par_el1 + pop x0, xzr // Restore PAR_EL1 from the stack + msr par_el1, x0 tbnz x3, #0, 3f // Bail out if we failed the translation ubfx x3, x3, #12, #36 // Extract IPA lsl x3, x3, #4 // and present it like HPFAR diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 94923609753b..02e9d09e1d80 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), NULL, reset_unknown, FAR_EL1 }, + /* PAR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), + NULL, reset_unknown, PAR_EL1 }, /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index e773659ccf9f..46048d24328c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; break; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case ldc2_op: /* This is bbit032 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case swc2_op: /* This is bbit1 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case sdc2_op: /* This is bbit132 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; +#endif case cop0_op: case cop1_op: case cop2_op: diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f9..4a8cb8d7cbd5 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) */ if (boot_params->sentinel) { /* fields in boot_params are left uninitialized, clear them */ - memset(&boot_params->olpc_ofw_header, 0, + memset(&boot_params->ext_ramdisk_image, 0, (char *)&boot_params->efi_info - - (char *)&boot_params->olpc_ofw_header); + (char *)&boot_params->ext_ramdisk_image); memset(&boot_params->kbd_status, 0, (char *)&boot_params->hdr - (char *)&boot_params->kbd_status); diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 50e5c58ced23..4c019179a57d 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int apply_microcode_amd(int cpu); -extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); +extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); #ifdef CONFIG_MICROCODE_AMD_EARLY #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f654ecefea5b..08a089043ccf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) static const int amd_erratum_383[]; static const int amd_erratum_400[]; -static bool cpu_has_amd_erratum(const int *erratum); +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); static void init_amd(struct cpuinfo_x86 *c) { @@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c) value &= ~(1ULL << 24); wrmsrl_safe(MSR_AMD64_BU_CFG2, value); - if (cpu_has_amd_erratum(amd_erratum_383)) + if (cpu_has_amd_erratum(c, amd_erratum_383)) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } - if (cpu_has_amd_erratum(amd_erratum_400)) + if (cpu_has_amd_erratum(c, amd_erratum_400)) set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); @@ -878,23 +878,13 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -static bool cpu_has_amd_erratum(const int *erratum) + +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { - struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); int osvw_id = *erratum++; u32 range; u32 ms; - /* - * If called early enough that current_cpu_data hasn't been initialized - * yet, fall back to boot_cpu_data. - */ - if (cpu->x86 == 0) - cpu = &boot_cpu_data; - - if (cpu->x86_vendor != X86_VENDOR_AMD) - return false; - if (osvw_id >= 0 && osvw_id < 65536 && cpu_has(cpu, X86_FEATURE_OSVW)) { u64 osvw_len; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a0adb7ee433..7123b5df479d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) return 0; } -static unsigned int verify_patch_size(int cpu, u32 patch_size, +static unsigned int verify_patch_size(u8 family, u32 patch_size, unsigned int size) { - struct cpuinfo_x86 *c = &cpu_data(cpu); u32 max_size; #define F1XH_MPB_MAX_SIZE 2048 @@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 - switch (c->x86) { + switch (family) { case 0x14: max_size = F14H_MPB_MAX_SIZE; break; @@ -277,9 +276,8 @@ static void cleanup(void) * driver cannot continue functioning normally. In such cases, we tear * down everything we've used up so far and exit. */ -static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) +static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) { - struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_header_amd *mc_hdr; struct ucode_patch *patch; unsigned int patch_size, crnt_size, ret; @@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) /* check if patch is for the current family */ proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); - if (proc_fam != c->x86) + if (proc_fam != family) return crnt_size; if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { @@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) return crnt_size; } - ret = verify_patch_size(cpu, patch_size, leftover); + ret = verify_patch_size(family, patch_size, leftover); if (!ret) { pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); return crnt_size; @@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) return crnt_size; } -static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) +static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, + size_t size) { enum ucode_state ret = UCODE_ERROR; unsigned int leftover; @@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz } while (leftover) { - crnt_size = verify_and_add_patch(cpu, fw, leftover); + crnt_size = verify_and_add_patch(family, fw, leftover); if (crnt_size < 0) return ret; @@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz return UCODE_OK; } -enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) +enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); - ret = __load_microcode_amd(cpu, data, size); + ret = __load_microcode_amd(family, data, size); if (ret != UCODE_OK) cleanup(); #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) /* save BSP's matching patch for early load */ - if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { - struct ucode_patch *p = find_patch(cpu); + if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { + struct ucode_patch *p = find_patch(smp_processor_id()); if (p) { memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), @@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, goto fw_release; } - ret = load_microcode_amd(cpu, fw->data, fw->size); + ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1d14ffee5749..6073104ccaa3 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c @@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg) uci->cpu_sig.sig = cpuid_eax(0x00000001); } #else -static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, - struct ucode_cpu_info *uci) +void load_ucode_amd_ap(void) { + unsigned int cpu = smp_processor_id(); + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u32 rev, eax; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); eax = cpuid_eax(0x00000001); - uci->cpu_sig.sig = eax; uci->cpu_sig.rev = rev; - c->microcode = rev; - c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); -} - -void load_ucode_amd_ap(void) -{ - unsigned int cpu = smp_processor_id(); - - collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); + uci->cpu_sig.sig = eax; if (cpu && !ucode_loaded) { void *ucode; @@ -265,8 +257,10 @@ void load_ucode_amd_ap(void) return; ucode = (void *)(initrd_start + ucode_offset); - if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) return; + ucode_loaded = true; } @@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void) { enum ucode_state ret; void *ucode; + u32 eax; + #ifdef CONFIG_X86_32 unsigned int bsp = boot_cpu_data.cpu_index; struct ucode_cpu_info *uci = ucode_cpu_info + bsp; @@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void) return 0; ucode = (void *)(initrd_start + ucode_offset); - ret = load_microcode_amd(0, ucode, ucode_size); + eax = cpuid_eax(0x00000001); + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + + ret = load_microcode_amd(eax, ucode, ucode_size); if (ret != UCODE_OK) return -EINVAL; diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 48f8375e4c6b..30277e27431a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = mmap_legacy_base(); + *begin = current->mm->mmap_legacy_base; *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index f63778cb2363..25e7e1372bb2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -98,7 +98,7 @@ static unsigned long mmap_base(void) * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -unsigned long mmap_legacy_base(void) +static unsigned long mmap_legacy_base(void) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; @@ -112,11 +112,13 @@ unsigned long mmap_legacy_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { + mm->mmap_legacy_base = mmap_legacy_base(); + mm->mmap_base = mmap_base(); + if (mmap_is_legacy()) { - mm->mmap_base = mmap_legacy_base(); + mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { - mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 056d11faef21..8f3eea6b80c5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) e820_add_region(start, end - start, type); } +void xen_ignore_unusable(struct e820entry *list, size_t map_size) +{ + struct e820entry *entry; + unsigned int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + if (entry->type == E820_UNUSABLE) + entry->type = E820_RAM; + } +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + /* + * Xen won't allow a 1:1 mapping to be created to UNUSABLE + * regions, so if we're using the machine memory map leave the + * region as RAM as it is in the pseudo-physical map. + * + * UNUSABLE regions in domUs are not handled and will need + * a patch in the future. + */ + if (xen_initial_domain()) + xen_ignore_unusable(map, memmap.nr_entries); + /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index ca92754eb846..b81c88e51daa 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc; - rc = native_cpu_up(cpu, tidle); - WARN_ON (xen_smp_intr_init(cpu)); + /* + * xen_smp_intr_init() needs to run before native_cpu_up() + * so that IPI vectors are set up on the booting CPU before + * it is marked online in native_cpu_up(). + */ + rc = xen_smp_intr_init(cpu); + WARN_ON(rc); + if (!rc) + rc = native_cpu_up(cpu, tidle); return rc; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + + mutex_lock(&obj->base.dev->struct_mutex); + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); kfree(sg); + + i915_gem_object_unpin_pages(obj); + + mutex_unlock(&obj->base.dev->struct_mutex); } static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e38b45786653..be79f477a38f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10042,6 +10042,8 @@ struct intel_display_error_state { u32 power_well_driver; + int num_transcoders; + struct intel_cursor_error_state { u32 control; u32 position; @@ -10050,16 +10052,7 @@ struct intel_display_error_state { } cursor[I915_MAX_PIPES]; struct intel_pipe_error_state { - enum transcoder cpu_transcoder; - u32 conf; u32 source; - - u32 htotal; - u32 hblank; - u32 hsync; - u32 vtotal; - u32 vblank; - u32 vsync; } pipe[I915_MAX_PIPES]; struct intel_plane_error_state { @@ -10071,6 +10064,19 @@ struct intel_display_error_state { u32 surface; u32 tile_offset; } plane[I915_MAX_PIPES]; + + struct intel_transcoder_error_state { + enum transcoder cpu_transcoder; + + u32 conf; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } transcoder[4]; }; struct intel_display_error_state * @@ -10078,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; - enum transcoder cpu_transcoder; + int transcoders[] = { + TRANSCODER_A, + TRANSCODER_B, + TRANSCODER_C, + TRANSCODER_EDP, + }; int i; + if (INTEL_INFO(dev)->num_pipes == 0) + return NULL; + error = kmalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; @@ -10089,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(i) { - cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); - error->pipe[i].cpu_transcoder = cpu_transcoder; - if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); @@ -10115,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } - error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); error->pipe[i].source = I915_READ(PIPESRC(i)); - error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); - error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); - error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); - error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); - error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); - error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + } + + error->num_transcoders = INTEL_INFO(dev)->num_pipes; + if (HAS_DDI(dev_priv->dev)) + error->num_transcoders++; /* Account for eDP. */ + + for (i = 0; i < error->num_transcoders; i++) { + enum transcoder cpu_transcoder = transcoders[i]; + + error->transcoder[i].cpu_transcoder = cpu_transcoder; + + error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); + error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); + error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); + error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); + error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); + error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); + error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); } /* In the code above we read the registers without checking if the power @@ -10144,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, { int i; + if (!error) + return; + err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); if (HAS_POWER_WELL(dev)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); for_each_pipe(i) { err_printf(m, "Pipe [%d]:\n", i); - err_printf(m, " CPU transcoder: %c\n", - transcoder_name(error->pipe[i].cpu_transcoder)); - err_printf(m, " CONF: %08x\n", error->pipe[i].conf); err_printf(m, " SRC: %08x\n", error->pipe[i].source); - err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); - err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); - err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); - err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); - err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); - err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); err_printf(m, "Plane [%d]:\n", i); err_printf(m, " CNTR: %08x\n", error->plane[i].control); @@ -10180,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " POS: %08x\n", error->cursor[i].position); err_printf(m, " BASE: %08x\n", error->cursor[i].base); } + + for (i = 0; i < error->num_transcoders; i++) { + err_printf(m, " CPU transcoder: %c\n", + transcoder_name(error->transcoder[i].cpu_transcoder)); + err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); + err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); + err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); + err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); + err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); + err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); + err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); + } } #endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 274b8e1b889f..9f19259667df 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2163,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); WREG32(reg, tmp_); \ } while (0) #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) -#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) #define WREG32_PLL_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32_PLL(reg); \ diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index f1c15754e73c..b79f4f5cdd62 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } + if (bo->tbo.sync_obj) { + r = radeon_fence_wait(bo->tbo.sync_obj, false); + if (r) { + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + return r; + } + } + r = radeon_bo_kmap(bo, &ptr); if (r) { DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bcc68ec204ad..f5e92cfcc140 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv730_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv730_golden_registers)); radeon_program_register_sequence(rdev, rv730_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv730_mgcg_init)); break; case CHIP_RV710: radeon_program_register_sequence(rdev, @@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv710_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv710_golden_registers)); radeon_program_register_sequence(rdev, rv710_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv710_mgcg_init)); break; case CHIP_RV740: radeon_program_register_sequence(rdev, rv740_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv740_golden_registers)); radeon_program_register_sequence(rdev, rv740_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv740_mgcg_init)); break; default: break; diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index dc112a7137fe..4296155090b2 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -959,23 +959,21 @@ out: return r; } -static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) +static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) { - struct entry *e = hash_lookup(mq, oblock); + struct mq_policy *mq = to_mq_policy(p); + struct entry *e; + + mutex_lock(&mq->lock); + + e = hash_lookup(mq, oblock); BUG_ON(!e || !e->in_cache); del(mq, e); e->in_cache = false; push(mq, e); -} -static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) -{ - struct mq_policy *mq = to_mq_policy(p); - - mutex_lock(&mq->lock); - remove_mapping(mq, oblock); mutex_unlock(&mq->lock); } diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 6bb7cf2de556..b10ba00cc3e6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, mem = (unsigned long) dt_alloc(size + 4, __alignof__(struct device_node)); + memset((void *)mem, 0, size); + ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); pr_debug(" unflattening %lx...\n", mem); diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index c47fd1e5450b..94716c779800 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c @@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sunxi_pinctrl_group *g = &pctl->groups[group]; + unsigned long flags; u32 val, mask; u16 strength; u8 dlevel; @@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, * 3: 40mA */ dlevel = strength / 10 - 1; + + spin_lock_irqsave(&pctl->lock, flags); + val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), pctl->membase + sunxi_dlevel_reg(g->pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); break; case PIN_CONFIG_BIAS_PULL_UP: + spin_lock_irqsave(&pctl->lock, flags); + val = readl(pctl->membase + sunxi_pull_reg(g->pin)); mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), pctl->membase + sunxi_pull_reg(g->pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); break; case PIN_CONFIG_BIAS_PULL_DOWN: + spin_lock_irqsave(&pctl->lock, flags); + val = readl(pctl->membase + sunxi_pull_reg(g->pin)); mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), pctl->membase + sunxi_pull_reg(g->pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); break; default: break; @@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev, u8 config) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + unsigned long flags; + u32 val, mask; + + spin_lock_irqsave(&pctl->lock, flags); - u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); - u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); + val = readl(pctl->membase + sunxi_mux_reg(pin)); + mask = MUX_PINS_MASK << sunxi_mux_offset(pin); writel((val & ~mask) | config << sunxi_mux_offset(pin), pctl->membase + sunxi_mux_reg(pin)); + + spin_unlock_irqrestore(&pctl->lock, flags); } static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, @@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip, struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); u32 reg = sunxi_data_reg(offset); u8 index = sunxi_data_offset(offset); + unsigned long flags; + u32 regval; + + spin_lock_irqsave(&pctl->lock, flags); + + regval = readl(pctl->membase + reg); - writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); + if (value) + regval |= BIT(index); + else + regval &= ~(BIT(index)); + + writel(regval, pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, @@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); u32 reg = sunxi_irq_cfg_reg(d->hwirq); u8 index = sunxi_irq_cfg_offset(d->hwirq); + unsigned long flags; + u32 regval; u8 mode; switch (type) { @@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, return -EINVAL; } - writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); + spin_lock_irqsave(&pctl->lock, flags); + + regval = readl(pctl->membase + reg); + regval &= ~IRQ_CFG_IRQ_MASK; + writel(regval | (mode << index), pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); return 0; } @@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d) u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); u32 status_reg = sunxi_irq_status_reg(d->hwirq); u8 status_idx = sunxi_irq_status_offset(d->hwirq); + unsigned long flags; u32 val; + spin_lock_irqsave(&pctl->lock, flags); + /* Mask the IRQ */ val = readl(pctl->membase + ctrl_reg); writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); /* Clear the IRQ */ writel(1 << status_idx, pctl->membase + status_reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static void sunxi_pinctrl_irq_mask(struct irq_data *d) @@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); u32 reg = sunxi_irq_ctrl_reg(d->hwirq); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); + unsigned long flags; u32 val; + spin_lock_irqsave(&pctl->lock, flags); + /* Mask the IRQ */ val = readl(pctl->membase + reg); writel(val & ~(1 << idx), pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static void sunxi_pinctrl_irq_unmask(struct irq_data *d) @@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) struct sunxi_desc_function *func; u32 reg = sunxi_irq_ctrl_reg(d->hwirq); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); + unsigned long flags; u32 val; func = sunxi_pinctrl_desc_find_function_by_pin(pctl, @@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) /* Change muxing to INT mode */ sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); + spin_lock_irqsave(&pctl->lock, flags); + /* Unmask the IRQ */ val = readl(pctl->membase + reg); writel(val | (1 << idx), pctl->membase + reg); + + spin_unlock_irqrestore(&pctl->lock, flags); } static struct irq_chip sunxi_pinctrl_irq_chip = { @@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, pctl); + spin_lock_init(&pctl->lock); + pctl->membase = of_iomap(node, 0); if (!pctl->membase) return -ENOMEM; diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index d68047d8f699..01c494f8a14f 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h @@ -14,6 +14,7 @@ #define __PINCTRL_SUNXI_H #include <linux/kernel.h> +#include <linux/spinlock.h> #define PA_BASE 0 #define PB_BASE 32 @@ -407,6 +408,7 @@ struct sunxi_pinctrl { unsigned ngroups; int irq; int irq_array[SUNXI_IRQ_NUMBER]; + spinlock_t lock; struct pinctrl_dev *pctl_dev; }; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a58ac435a9a4..5e8be462aed5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) for_each_possible_cpu(i) memset(per_cpu(cpu_evtchn_mask, i), - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); } static inline void clear_evtchn(int port) @@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) { + struct shared_info *s = HYPERVISOR_shared_info; struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); + int masked; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) bind_vcpu.vcpu = tcpu; /* + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ + masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); + + /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. @@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); + if (!masked) + unmask_evtchn(evtchn); + return 0; } diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 9435384562a2..544a809819c3 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void) glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0); - if (IS_ERR(glock_workqueue)) - return PTR_ERR(glock_workqueue); + if (!glock_workqueue) + return -ENOMEM; gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); - if (IS_ERR(gfs2_delete_workqueue)) { + if (!gfs2_delete_workqueue) { destroy_workqueue(glock_workqueue); - return PTR_ERR(gfs2_delete_workqueue); + return -ENOMEM; } register_shrinker(&glock_shrinker); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 5f2e5224c51c..e2e0a90396e7 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) * None of the buffers should be dirty, locked, or pinned. */ -static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) +static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, + unsigned int nr_revokes) { struct gfs2_sbd *sdp = gl->gl_sbd; struct list_head *head = &gl->gl_ail_list; @@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); - list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { + list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { + if (nr_revokes == 0) + break; bh = bd->bd_bh; if (bh->b_state & b_state) { if (fsync) @@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gfs2_ail_error(gl, bh); } gfs2_trans_add_revoke(sdp, bd); + nr_revokes--; } GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); @@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) WARN_ON_ONCE(current->journal_info); current->journal_info = &tr; - __gfs2_ail_flush(gl, 0); + __gfs2_ail_flush(gl, 0, tr.tr_revokes); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); @@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { struct gfs2_sbd *sdp = gl->gl_sbd; unsigned int revokes = atomic_read(&gl->gl_ail_count); + unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); int ret; if (!revokes) return; - ret = gfs2_trans_begin(sdp, 0, revokes); + while (revokes > max_revokes) + max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); + + ret = gfs2_trans_begin(sdp, 0, max_revokes); if (ret) return; - __gfs2_ail_flush(gl, fsync); + __gfs2_ail_flush(gl, fsync, max_revokes); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index bbb2715171cd..64915eeae5a7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, } gfs2_glock_dq_uninit(ghs); if (IS_ERR(d)) - return PTR_RET(d); + return PTR_ERR(d); return error; } else if (error != -ENOENT) { goto fail_gunlock; @@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name, struct gfs2_holder gh; int ret; + /* For selinux during lookup */ + if (gfs2_glock_is_locked_by_me(ip->i_gl)) + return generic_getxattr(dentry, name, data, size); + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); ret = gfs2_glock_nq(&gh); if (ret == 0) { diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index e04d0e09ee7b..7b0f5043cf24 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void) goto fail_wq; gfs2_control_wq = alloc_workqueue("gfs2_control", - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); + WQ_UNBOUND | WQ_FREEZABLE, 0); if (!gfs2_control_wq) goto fail_recovery; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 94441a407337..737e15615b04 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file, de = next; } while (de); spin_unlock(&proc_subdir_lock); - return 0; + return 1; } int proc_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/proc/root.c b/fs/proc/root.c index 229e366598da..e0a790da726d 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr static int proc_root_readdir(struct file *file, struct dir_context *ctx) { if (ctx->pos < FIRST_PROCESS_ENTRY) { - proc_readdir(file, ctx); + int error = proc_readdir(file, ctx); + if (unlikely(error <= 0)) + return error; ctx->pos = FIRST_PROCESS_ENTRY; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fb425aa16c01..faf4b7c1ad12 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -332,6 +332,7 @@ struct mm_struct { unsigned long pgoff, unsigned long flags); #endif unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; diff --git a/include/linux/sched.h b/include/linux/sched.h index e9995eb5985c..078066daffd4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -314,7 +314,6 @@ struct nsproxy; struct user_namespace; #ifdef CONFIG_MMU -extern unsigned long mmap_legacy_base(void); extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a326f27d7f09..0b479a6a22bb 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) BUG_ON(bits > 32); WARN_ON(!irqs_disabled()); read_sched_clock = read; - sched_clock_mask = (1 << bits) - 1; + sched_clock_mask = (1ULL << bits) - 1; cd.rate = rate; /* calculate the mult/shift to convert counter ticks to ns. */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e77edc97e036..e8a1516cc0a3 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -182,7 +182,8 @@ static bool can_stop_full_tick(void) * Don't allow the user to think they can get * full NO_HZ with this machine. */ - WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); + WARN_ONCE(have_nohz_full_mask, + "NO_HZ FULL will not work with unstable sched clock"); return false; } #endif @@ -343,8 +344,6 @@ static int tick_nohz_init_all(void) void __init tick_nohz_init(void) { - int cpu; - if (!have_nohz_full_mask) { if (tick_nohz_init_all() < 0) return; diff --git a/kernel/wait.c b/kernel/wait.c index dec68bd4e9d8..d550920e040c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); /** * wake_up_atomic_t - Wake up a waiter on a atomic_t - * @word: The word being waited on, a kernel virtual address - * @bit: The bit of the word being waited on + * @p: The atomic_t being waited on, a kernel virtual address * * Wake up anyone waiting for the atomic_t to go to zero. * |