summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/mmp2-brownstone.dts4
-rw-r--r--arch/arm/boot/dts/omap2.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi2
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi13
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear300.dtsi2
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts8
-rw-r--r--arch/arm/boot/dts/spear320.dtsi2
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/spear600.dtsi1
-rw-r--r--arch/arm/common/dmabounce.c16
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/domain.h18
-rw-r--r--arch/arm/include/asm/futex.h1
-rw-r--r--arch/arm/include/asm/hardware/sp810.h2
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm/kernel/entry-armv.S1
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-thumb.c2
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/ptrace.c3
-rw-r--r--arch/arm/kernel/signal.c46
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mach-dove/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-dove/include/mach/dove.h1
-rw-r--r--arch/arm/mach-exynos/Kconfig8
-rw-r--r--arch/arm/mach-exynos/pm_domains.c13
-rw-r--r--arch/arm/mach-highbank/Makefile6
-rw-r--r--arch/arm/mach-highbank/core.h1
-rw-r--r--arch/arm/mach-highbank/highbank.c14
-rw-r--r--arch/arm/mach-highbank/smc.S27
-rw-r--r--arch/arm/mach-imx/Kconfig1
-rw-r--r--arch/arm/mach-imx/clk-imx1.c3
-rw-r--r--arch/arm/mach-imx/clk-imx21.c4
-rw-r--r--arch/arm/mach-imx/clk-imx25.c2
-rw-r--r--arch/arm/mach-imx/clk-imx27.c3
-rw-r--r--arch/arm/mach-imx/clk-imx31.c3
-rw-r--r--arch/arm/mach-imx/clk-imx35.c15
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c12
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c26
-rw-r--r--arch/arm/mach-imx/clk-pllv2.c93
-rw-r--r--arch/arm/mach-imx/crm-regs-imx5.h2
-rw-r--r--arch/arm/mach-imx/hotplug.c42
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c1
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c38
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c2
-rw-r--r--arch/arm/mach-imx/mm-imx3.c4
-rw-r--r--arch/arm/mach-imx/mm-imx5.c2
-rw-r--r--arch/arm/mach-kirkwood/board-iconnect.c3
-rw-r--r--arch/arm/mach-kirkwood/common.c9
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h1
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h29
-rw-r--r--arch/arm/mach-mmp/irq.c7
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/mv78xx0.h2
-rw-r--r--arch/arm/mach-mxs/mach-apx4devkit.c11
-rw-r--r--arch/arm/mach-omap2/board-flash.c5
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c6
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c28
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c10
-rw-r--r--arch/arm/mach-omap2/clockdomain.h4
-rw-r--r--arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/clockdomains44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/cm.h11
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c4
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-omap2/dsp.c3
-rw-r--r--arch/arm/mach-omap2/id.c11
-rw-r--r--arch/arm/mach-omap2/irq.c1
-rw-r--r--arch/arm/mach-omap2/mux.c4
-rw-r--r--arch/arm/mach-omap2/mux.h11
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c34
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c36
-rw-r--r--arch/arm/mach-omap2/omap_l3_smx.c3
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c6
-rw-r--r--arch/arm/mach-omap2/pm34xx.c1
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c14
-rw-r--r--arch/arm/mach-omap2/serial.c67
-rw-r--r--arch/arm/mach-omap2/twl-common.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c6
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c2
-rw-r--r--arch/arm/mach-orion5x/include/mach/bridge-regs.h2
-rw-r--r--arch/arm/mach-orion5x/include/mach/io.h22
-rw-r--r--arch/arm/mach-orion5x/include/mach/orion5x.h1
-rw-r--r--arch/arm/mach-pxa/hx4700.c15
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c2
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c1
-rw-r--r--arch/arm/mach-shmobile/board-kzm9d.c1
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c1
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c3
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c8
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7779.c7
-rw-r--r--arch/arm/mach-shmobile/platsmp.c10
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/dma.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/timex.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear13xx/spear1310.c2
-rw-r--r--arch/arm/mach-spear13xx/spear1340.c2
-rw-r--r--arch/arm/mach-spear13xx/spear13xx.c2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/timex.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear3xx/spear300.c2
-rw-r--r--arch/arm/mach-spear3xx/spear310.c2
-rw-r--r--arch/arm/mach-spear3xx/spear320.c2
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c4
-rw-r--r--arch/arm/mach-spear6xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c2
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500.c66
-rw-r--r--arch/arm/mach-ux500/timer.c2
-rw-r--r--arch/arm/mach-versatile/core.c19
-rw-r--r--arch/arm/mach-versatile/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-versatile/include/mach/io.h27
-rw-r--r--arch/arm/mach-versatile/pci.c19
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mm.h4
-rw-r--r--arch/arm/mm/mmu.c74
-rw-r--r--arch/arm/net/bpf_jit_32.c5
-rw-r--r--arch/arm/net/bpf_jit_32.h4
-rw-r--r--arch/arm/plat-mxc/epit.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mx2_cam.h2
-rw-r--r--arch/arm/plat-mxc/time.c24
-rw-r--r--arch/arm/plat-omap/clock.c2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h33
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h4
-rw-r--r--arch/arm/plat-orion/common.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c1
-rw-r--r--arch/arm/plat-samsung/adc.c8
-rw-r--r--arch/arm/plat-samsung/devs.c3
-rw-r--r--arch/arm/plat-samsung/include/plat/map-s3c.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/watchdog-reset.h2
-rw-r--r--arch/arm/plat-samsung/s5p-clock.c1
-rw-r--r--arch/arm/plat-spear/include/plat/debug-macro.S2
-rw-r--r--arch/arm/plat-spear/include/plat/pl080.h2
-rw-r--r--arch/arm/plat-spear/include/plat/shirq.h2
-rw-r--r--arch/arm/plat-spear/include/plat/timex.h2
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h2
-rw-r--r--arch/arm/plat-spear/pl080.c2
-rw-r--r--arch/arm/plat-spear/restart.c2
-rw-r--r--arch/arm/plat-spear/shirq.c2
-rw-r--r--arch/blackfin/kernel/process.c2
-rw-r--r--arch/h8300/include/asm/pgtable.h3
-rw-r--r--arch/h8300/include/asm/uaccess.h3
-rw-r--r--arch/h8300/kernel/setup.c23
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/h8300/kernel/time.c1
-rw-r--r--arch/h8300/mm/init.c17
-rw-r--r--arch/m32r/boot/compressed/Makefile6
-rw-r--r--arch/m32r/boot/compressed/misc.c12
-rw-r--r--arch/m32r/include/asm/ptrace.h3
-rw-r--r--arch/m32r/kernel/ptrace.c7
-rw-r--r--arch/m32r/kernel/signal.c2
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/include/asm/Kbuild2
-rw-r--r--arch/m68k/include/asm/m528xsim.h2
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h11
-rw-r--r--arch/m68k/kernel/ptrace.c2
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68k/lib/uaccess.c74
-rw-r--r--arch/m68k/platform/68328/timers.c6
-rw-r--r--arch/m68k/platform/68360/config.c7
-rw-r--r--arch/m68k/platform/coldfire/clk.c7
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/bcm63xx/dev-pcmcia.c4
-rw-r--r--arch/mips/cavium-octeon/Kconfig4
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/bitops.h1
-rw-r--r--arch/mips/include/asm/cmpxchg.h1
-rw-r--r--arch/mips/include/asm/cpu.h7
-rw-r--r--arch/mips/include/asm/gic.h15
-rw-r--r--arch/mips/include/asm/inst.h4
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/asm/irq.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h2
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h10
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/include/asm/switch_to.h6
-rw-r--r--arch/mips/include/asm/thread_info.h4
-rw-r--r--arch/mips/kernel/cpu-probe.c11
-rw-r--r--arch/mips/kernel/mips_ksyms.c8
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c5
-rw-r--r--arch/mips/kernel/r2300_switch.S15
-rw-r--r--arch/mips/kernel/r4k_switch.S12
-rw-r--r--arch/mips/kernel/smp-bmips.c15
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/kernel/smtc.c13
-rw-r--r--arch/mips/kernel/sync-r4k.c5
-rw-r--r--arch/mips/kernel/traps.c7
-rw-r--r--arch/mips/kernel/vmlinux.lds.S3
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c5
-rw-r--r--arch/mips/mm/page-funcs.S50
-rw-r--r--arch/mips/mm/page.c67
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mti-malta/malta-pci.c7
-rw-r--r--arch/mips/mti-malta/malta-setup.c2
-rw-r--r--arch/mips/netlogic/xlp/setup.c8
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c12
-rw-r--r--arch/mips/pci/fixup-lemote2f.c12
-rw-r--r--arch/mips/pci/fixup-malta.c6
-rw-r--r--arch/mips/pci/fixup-mpc30x.c4
-rw-r--r--arch/mips/pci/fixup-sb1250.c6
-rw-r--r--arch/mips/pci/ops-tx4927.c2
-rw-r--r--arch/mips/pci/pci-ip27.c2
-rw-r--r--arch/mips/pci/pci-lantiq.c4
-rw-r--r--arch/mips/pci/pci-xlr.c61
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c2
-rw-r--r--arch/mips/powertv/asic/asic-calliope.c2
-rw-r--r--arch/mips/powertv/asic/asic-cronus.c2
-rw-r--r--arch/mips/powertv/asic/asic-gaia.c2
-rw-r--r--arch/mips/powertv/asic/asic-zeus.c2
-rw-r--r--arch/mips/txx9/generic/pci.c2
-rw-r--r--arch/mn10300/include/asm/ptrace.h3
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/timex.h11
-rw-r--r--arch/mn10300/kernel/cevt-mn10300.c10
-rw-r--r--arch/mn10300/kernel/internal.h2
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/mn10300/kernel/signal.c5
-rw-r--r--arch/mn10300/kernel/traps.c1
-rw-r--r--arch/mn10300/mm/dma-alloc.c1
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/timex.h4
-rw-r--r--arch/mn10300/unit-asb2303/smc91111.c1
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/timex.h4
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c1
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/timex.h4
-rw-r--r--arch/parisc/Makefile3
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h14
-rw-r--r--arch/powerpc/kernel/entry_64.S97
-rw-r--r--arch/powerpc/kernel/irq.c50
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kvm/book3s_hv.c96
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c1
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c11
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c4
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c61
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c17
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/Makefile16
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c4
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c2
-rw-r--r--arch/sh/include/asm/Kbuild34
-rw-r--r--arch/sh/include/asm/bitsperlong.h1
-rw-r--r--arch/sh/include/asm/cputime.h6
-rw-r--r--arch/sh/include/asm/current.h1
-rw-r--r--arch/sh/include/asm/delay.h1
-rw-r--r--arch/sh/include/asm/div64.h1
-rw-r--r--arch/sh/include/asm/emergency-restart.h6
-rw-r--r--arch/sh/include/asm/errno.h6
-rw-r--r--arch/sh/include/asm/fcntl.h1
-rw-r--r--arch/sh/include/asm/io_noioport.h17
-rw-r--r--arch/sh/include/asm/ioctl.h1
-rw-r--r--arch/sh/include/asm/ipcbuf.h1
-rw-r--r--arch/sh/include/asm/irq_regs.h1
-rw-r--r--arch/sh/include/asm/kvm_para.h1
-rw-r--r--arch/sh/include/asm/local.h7
-rw-r--r--arch/sh/include/asm/local64.h1
-rw-r--r--arch/sh/include/asm/mman.h1
-rw-r--r--arch/sh/include/asm/msgbuf.h1
-rw-r--r--arch/sh/include/asm/param.h1
-rw-r--r--arch/sh/include/asm/parport.h1
-rw-r--r--arch/sh/include/asm/percpu.h6
-rw-r--r--arch/sh/include/asm/poll.h1
-rw-r--r--arch/sh/include/asm/resource.h6
-rw-r--r--arch/sh/include/asm/scatterlist.h6
-rw-r--r--arch/sh/include/asm/sembuf.h1
-rw-r--r--arch/sh/include/asm/serial.h1
-rw-r--r--arch/sh/include/asm/shmbuf.h1
-rw-r--r--arch/sh/include/asm/siginfo.h6
-rw-r--r--arch/sh/include/asm/sizes.h1
-rw-r--r--arch/sh/include/asm/socket.h1
-rw-r--r--arch/sh/include/asm/statfs.h6
-rw-r--r--arch/sh/include/asm/termbits.h1
-rw-r--r--arch/sh/include/asm/termios.h1
-rw-r--r--arch/sh/include/asm/uaccess.h75
-rw-r--r--arch/sh/include/asm/uaccess_32.h75
-rw-r--r--arch/sh/include/asm/uaccess_64.h4
-rw-r--r--arch/sh/include/asm/ucontext.h1
-rw-r--r--arch/sh/include/asm/word-at-a-time.h53
-rw-r--r--arch/sh/include/asm/xor.h1
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/ubc.h28
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7720.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7734.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c2
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S82
-rw-r--r--arch/sh/kernel/process.c1
-rw-r--r--arch/sh/kernel/process_64.c1
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c2
-rw-r--r--arch/sparc/include/asm/cmt.h59
-rw-r--r--arch/sparc/include/asm/mpmbox.h67
-rw-r--r--arch/sparc/kernel/vio.c2
-rw-r--r--arch/tile/include/asm/thread_info.h5
-rw-r--r--arch/tile/include/asm/uaccess.h2
-rw-r--r--arch/tile/kernel/backtrace.c9
-rw-r--r--arch/tile/kernel/entry.S14
-rw-r--r--arch/tile/kernel/setup.c1
-rw-r--r--arch/um/drivers/mconsole_kern.c1
-rw-r--r--arch/x86/boot/header.S42
-rw-r--r--arch/x86/boot/tools/build.c172
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S6
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/msr.h46
-rw-r--r--arch/x86/include/asm/nmi.h14
-rw-r--r--arch/x86/include/asm/paravirt.h41
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/perf_event.h22
-rw-r--r--arch/x86/include/asm/pgtable-3level.h30
-rw-r--r--arch/x86/include/asm/uaccess.h12
-rw-r--r--arch/x86/include/asm/uprobes.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/kernel/acpi/boot.c27
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c39
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.pl25
-rw-r--r--arch/x86/kernel/cpu/perf_event.c122
-rw-r--r--arch/x86/kernel/cpu/perf_event.h28
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c103
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c267
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c1850
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h424
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c4
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/kgdb.c8
-rw-r--r--arch/x86/kernel/kvmclock.c5
-rw-r--r--arch/x86/kernel/microcode_core.c64
-rw-r--r--arch/x86/kernel/nmi_selftest.c4
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/reboot.c14
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/kernel/uprobes.c3
-rw-r--r--arch/x86/kernel/vsyscall_64.c39
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/kvm/pmu.c22
-rw-r--r--arch/x86/kvm/trace.h12
-rw-r--r--arch/x86/lib/csum-wrappers_64.c2
-rw-r--r--arch/x86/lib/msr-reg-export.c4
-rw-r--r--arch/x86/lib/msr-reg.S10
-rw-r--r--arch/x86/lib/usercopy.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt8
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/oprofile/op_model_amd.c4
-rw-r--r--arch/x86/platform/mrst/early_printk_mrst.c13
-rw-r--r--arch/x86/platform/mrst/mrst.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk14
-rw-r--r--arch/x86/um/sys_call_table_32.c4
-rw-r--r--arch/x86/vdso/vdso32-setup.c6
-rw-r--r--arch/x86/xen/enlighten.c10
-rw-r--r--arch/x86/xen/p2m.c36
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--arch/xtensa/Makefile4
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S3
-rw-r--r--arch/xtensa/mm/init.c18
416 files changed, 4742 insertions, 2048 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b649c5904a4f..a91009c61870 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,7 +7,6 @@ config ARM
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
- select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -294,6 +293,7 @@ config ARCH_VERSATILE
select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select NEED_MACH_IO_H if PCI
select PLAT_VERSATILE
select PLAT_VERSATILE_CLCD
select PLAT_VERSATILE_FPGA_IRQ
@@ -589,6 +589,7 @@ config ARCH_ORION5X
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
+ select NEED_MACH_IO_H
select PLAT_ORION
help
Support for the following Marvell Orion 5x series SoCs:
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
index 153a4b2d12b5..c9b4f27d191e 100644
--- a/arch/arm/boot/dts/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/mmp2-brownstone.dts
@@ -11,7 +11,7 @@
/include/ "mmp2.dtsi"
/ {
- model = "Marvell MMP2 Aspenite Development Board";
+ model = "Marvell MMP2 Brownstone Development Board";
compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2";
chosen {
@@ -19,7 +19,7 @@
};
memory {
- reg = <0x00000000 0x04000000>;
+ reg = <0x00000000 0x08000000>;
};
soc {
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index f2ab4ea7cc0e..581cb081cb0f 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -44,6 +44,8 @@
compatible = "ti,omap2-intc";
interrupt-controller;
#interrupt-cells = <1>;
+ ti,intc-size = <96>;
+ reg = <0x480FE000 0x1000>;
};
uart1: serial@4806a000 {
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index 8314e4171884..dd4358bc26e2 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr1310 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 9e61da404d57..419ea7413d23 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr1310 SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index 0d8472e5ab9f..c9a54e06fb68 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr1340 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index a26fc47a55e8..d71fe2a68f09 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr1340 SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 1f8e1e1481df..f7b84aced654 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr13xx SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -43,8 +43,8 @@
pmu {
compatible = "arm,cortex-a9-pmu";
- interrupts = <0 8 0x04
- 0 9 0x04>;
+ interrupts = <0 6 0x04
+ 0 7 0x04>;
};
L2: l2-cache {
@@ -119,8 +119,8 @@
gmac0: eth@e2000000 {
compatible = "st,spear600-gmac";
reg = <0xe2000000 0x8000>;
- interrupts = <0 23 0x4
- 0 24 0x4>;
+ interrupts = <0 33 0x4
+ 0 34 0x4>;
interrupt-names = "macirq", "eth_wake_irq";
status = "disabled";
};
@@ -202,6 +202,7 @@
kbd@e0300000 {
compatible = "st,spear300-kbd";
reg = <0xe0300000 0x1000>;
+ interrupts = <0 52 0x4>;
status = "disabled";
};
@@ -224,7 +225,7 @@
serial@e0000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xe0000000 0x1000>;
- interrupts = <0 36 0x4>;
+ interrupts = <0 35 0x4>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index fc82b1a26458..d71b8d581e3d 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr300 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index 01c5e358fdb2..ed3627c116cc 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr300 SoC
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index dc5e2d445a93..b00544e0cd5d 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr310 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index e47081c494d9..62fc4fb3e5f9 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr310 SoC
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index 6308fa3bec1e..e4e912f95024 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr320 Evaluation Baord
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -15,8 +15,8 @@
/include/ "spear320.dtsi"
/ {
- model = "ST SPEAr300 Evaluation Board";
- compatible = "st,spear300-evb", "st,spear300";
+ model = "ST SPEAr320 Evaluation Board";
+ compatible = "st,spear320-evb", "st,spear320";
#address-cells = <1>;
#size-cells = <1>;
@@ -26,7 +26,7 @@
ahb {
pinmux@b3000000 {
- st,pinmux-mode = <3>;
+ st,pinmux-mode = <4>;
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index 5372ca399b1f..1f49d69595a0 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for SPEAr320 SoC
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 91072553963f..3a8bb5736928 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -1,7 +1,7 @@
/*
* DTS file for all SPEAr3xx SoCs
*
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index 089f0a42c50e..a3c36e47d7ef 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -181,6 +181,7 @@
timer@f0000000 {
compatible = "st,spear-timer";
reg = <0xf0000000 0x400>;
+ interrupt-parent = <&vic0>;
interrupts = <16>;
};
};
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9d7eb530f95f..aa07f5938f05 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
struct safe_buffer *buf;
unsigned long off;
- dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
- __func__, addr, off, sz, dir);
+ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+ __func__, addr, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf)
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
BUG_ON(buf->direction != dir);
- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
struct safe_buffer *buf;
unsigned long off;
- dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
- __func__, addr, off, sz, dir);
+ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+ __func__, addr, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf)
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
BUG_ON(buf->direction != dir);
- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 9854ff4279e0..11828e632532 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_LIBUSUAL=y
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 68374ba6a943..c79f61faa3a5 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,7 +243,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
{
u64 result;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 3d2220498abc..6ddbe446425e 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -60,13 +60,13 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x) \
- do { \
- __asm__ __volatile__( \
- "mcr p15, 0, %0, c3, c0 @ set domain" \
- : : "r" (x)); \
- isb(); \
- } while (0)
+static inline void set_domain(unsigned val)
+{
+ asm volatile(
+ "mcr p15, 0, %0, c3, c0 @ set domain"
+ : : "r" (val));
+ isb();
+}
#define modify_domain(dom,type) \
do { \
@@ -78,8 +78,8 @@
} while (0)
#else
-#define set_domain(x) do { } while (0)
-#define modify_domain(dom,type) do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
/*
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 7be54690aeec..e42cf597f6e6 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -19,6 +19,7 @@
" .long 1b, 4f, 2b, 4f\n" \
" .popsection\n" \
" .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
"4: mov %0, " err_reg "\n" \
" b 3b\n" \
" .popsection"
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index e0d1c0cfa548..6b9b077d86b3 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -4,7 +4,7 @@
* ARM PrimeXsys System Controller SP810 header file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index b79f8e97f775..af7b0bda3355 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_RESTARTSYS 10
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
/* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
/*
* Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 437f0c426517..0d1851ca6eb9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -495,6 +495,7 @@ ENDPROC(__und_usr)
* The out of line fixup for the ldrt above.
*/
.pushsection .fixup, "ax"
+ .align 2
4: mov pc, r9
.popsection
.pushsection __ex_table,"a"
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index ba32b393b3f0..38c1a3b103a0 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
TEST_BF_R ("mov pc, r",0,2f,"")
TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
TEST_BB( "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
- TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+ TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
#endif
TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
index 8f96ec778e8d..6123daf397a7 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = {
/* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
/* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
/* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
- DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+ DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
REGS(PC, NOSPPCX, 0, 0, 0)),
/* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 186c8cb982c5..a02eada3aa5d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
- return -EPERM;
+ return -EOPNOTSUPP;
}
/*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 5700a7ae7f0b..14e38261cd31 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
-#include <linux/unistd.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
- if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
- scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index fd2392a17ac1..536c5d6b340b 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -27,6 +27,7 @@
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
@@ -47,6 +48,18 @@ const unsigned long sigreturn_codes[7] = {
};
/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled. In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+ SWI_SYS_RESTART, /* swi __NR_restart_syscall */
+ 0xe49df004, /* ldr pc, [sp], #4 */
+};
+
+/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
- case -ERESTART_RESTARTBLOCK:
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->ARM_r0 = -EINTR;
+ break;
}
}
@@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
* debugger has chosen to restart at a different PC.
*/
if (regs->ARM_pc == restart_addr) {
- if (retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK
+ if (retval == -ERESTARTNOHAND
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
regs->ARM_pc = continue_addr;
}
- clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
handle_signal(signr, &ka, &info, regs);
@@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
* ignore the restart.
*/
if (retval == -ERESTART_RESTARTBLOCK
- && regs->ARM_pc == restart_addr)
- set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+ && regs->ARM_pc == continue_addr) {
+ if (thumb_mode(regs)) {
+ regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+ regs->ARM_pc -= 2;
+ } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 4;
+#else
+ u32 __user *usp;
+
+ regs->ARM_sp -= 4;
+ usp = (u32 __user *)regs->ARM_sp;
+
+ if (put_user(regs->ARM_pc, usp) == 0) {
+ regs->ARM_pc = KERN_RESTART_CODE;
+ } else {
+ regs->ARM_sp += 4;
+ force_sigsegv(0, current);
+ }
+#endif
+ }
+ }
}
restore_saved_sigmask();
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 5ff067b7c752..6fcfe8398aa4 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,5 +8,7 @@
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 4928d89758f4..3647170e9a16 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
+ memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+ syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 43a31fb06318..36ff15bbfdd4 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -183,7 +183,9 @@ SECTIONS
}
#endif
+#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
diff --git a/arch/arm/mach-dove/include/mach/bridge-regs.h b/arch/arm/mach-dove/include/mach/bridge-regs.h
index 226949dc4ac0..f953bb54aa9d 100644
--- a/arch/arm/mach-dove/include/mach/bridge-regs.h
+++ b/arch/arm/mach-dove/include/mach/bridge-regs.h
@@ -50,5 +50,6 @@
#define POWER_MANAGEMENT (BRIDGE_VIRT_BASE | 0x011c)
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#endif
diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h
index ad1165d488c1..d52b0ef313b7 100644
--- a/arch/arm/mach-dove/include/mach/dove.h
+++ b/arch/arm/mach-dove/include/mach/dove.h
@@ -78,6 +78,7 @@
/* North-South Bridge */
#define BRIDGE_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x20000)
/* Cryptographic Engine */
#define DOVE_CRYPT_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x30000)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 573be57d3d28..6f6d13f91e4c 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -212,7 +212,7 @@ config MACH_SMDKV310
select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
@@ -264,7 +264,7 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_ONENAND
select S5P_DEV_TV
select EXYNOS_DEV_SYSMMU
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select EXYNOS_DEV_DRM
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
@@ -303,7 +303,7 @@ config MACH_NURI
select S5P_DEV_MFC
select S5P_DEV_USB_EHCI
select S5P_SETUP_MIPIPHY
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select EXYNOS_DEV_DRM
select EXYNOS4_SETUP_FIMC
select EXYNOS4_SETUP_FIMD0
@@ -341,7 +341,7 @@ config MACH_ORIGEN
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_DRM
select EXYNOS_DEV_SYSMMU
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_SDHCI
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index e9fafcf163de..373c3c00d24c 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
- if (pm_genpd_add_device(&pd->pd, &pdev->dev))
+ if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
+ pm_genpd_dev_need_restore(&pdev->dev, true);
+ else
pr_info("%s: error in adding %s device to %s power"
"domain\n", __func__, dev_name(&pdev->dev),
pd->name);
@@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
if (of_have_populated_dt())
return exynos_pm_dt_parse_domains();
- for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
- pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
- exynos4_pm_domains[idx]->is_off);
+ for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
+ struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
+ int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+ pm_genpd_init(&pd->pd, NULL, !on);
+ }
#ifdef CONFIG_S5P_DEV_FIMD0
exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
diff --git a/arch/arm/mach-highbank/Makefile b/arch/arm/mach-highbank/Makefile
index f8437dd238c2..ded4652ada80 100644
--- a/arch/arm/mach-highbank/Makefile
+++ b/arch/arm/mach-highbank/Makefile
@@ -1,4 +1,8 @@
-obj-y := clock.o highbank.o system.o
+obj-y := clock.o highbank.o system.o smc.o
+
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_smc.o :=-Wa,-march=armv7-a$(plus_sec)
+
obj-$(CONFIG_DEBUG_HIGHBANK_UART) += lluart.o
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index d8e2d0be64ac..141ed5171826 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -8,3 +8,4 @@ extern void highbank_lluart_map_io(void);
static inline void highbank_lluart_map_io(void) {}
#endif
+extern void highbank_smc1(int fn, int arg);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 410a112bb52e..8777612b1a42 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -85,10 +85,24 @@ const static struct of_device_id irq_match[] = {
{}
};
+#ifdef CONFIG_CACHE_L2X0
+static void highbank_l2x0_disable(void)
+{
+ /* Disable PL310 L2 Cache controller */
+ highbank_smc1(0x102, 0x0);
+}
+#endif
+
static void __init highbank_init_irq(void)
{
of_irq_init(irq_match);
+
+#ifdef CONFIG_CACHE_L2X0
+ /* Enable PL310 L2 Cache controller */
+ highbank_smc1(0x102, 0x1);
l2x0_of_init(0, ~0UL);
+ outer_cache.disable = highbank_l2x0_disable;
+#endif
}
static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S
new file mode 100644
index 000000000000..407d17baaaa9
--- /dev/null
+++ b/arch/arm/mach-highbank/smc.S
@@ -0,0 +1,27 @@
+/*
+ * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * This is common routine to manage secure monitor API
+ * used to modify the PL310 secure registers.
+ * 'r0' contains the value to be modified and 'r12' contains
+ * the monitor API number.
+ * Function signature : void highbank_smc1(u32 fn, u32 arg)
+ */
+
+ENTRY(highbank_smc1)
+ stmfd sp!, {r4-r11, lr}
+ mov r12, r0
+ mov r0, r1
+ dsb
+ smc #0
+ ldmfd sp!, {r4-r11, pc}
+ENDPROC(highbank_smc1)
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 0021f726b153..eff4db5de0dd 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -477,6 +477,7 @@ config MACH_MX31_3DS
select IMX_HAVE_PLATFORM_IMX2_WDT
select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_KEYPAD
+ select IMX_HAVE_PLATFORM_IMX_SSI
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_IPU_CORE
select IMX_HAVE_PLATFORM_MXC_EHCI
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
index 0f0beb580b73..516ddee1948e 100644
--- a/arch/arm/mach-imx/clk-imx1.c
+++ b/arch/arm/mach-imx/clk-imx1.c
@@ -108,8 +108,7 @@ int __init mx1_clocks_init(unsigned long fref)
clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
clk_register_clkdev(clk[clko], "clko", NULL);
- mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
- MX1_TIM1_INT);
+ mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
index 4e4f384ee8dd..ea13e61bd5f3 100644
--- a/arch/arm/mach-imx/clk-imx21.c
+++ b/arch/arm/mach-imx/clk-imx21.c
@@ -180,7 +180,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href)
clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
- mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
- MX21_INT_GPT1);
+ mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1);
+
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index d9833bb5fd61..fdd8cc87c9fe 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -243,6 +243,6 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
clk_register_clkdev(clk[iim_ipg], "iim", NULL);
- mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+ mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 50a7ebd8d1b2..295cbd7c08dc 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -263,8 +263,7 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
- mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
- MX27_INT_GPT1);
+ mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
clk_prepare_enable(clk[emi_ahb_gate]);
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index a854b9cae5ea..c9a06d800f8e 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -175,8 +175,7 @@ int __init mx31_clocks_init(unsigned long fref)
mx31_revision();
clk_disable_unprepare(clk[iim_gate]);
- mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
- MX31_INT_GPT);
+ mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index a9e60bf7dd75..c6422fb10bae 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
pr_err("i.MX35 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
-
clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,14 +263,20 @@ int __init mx35_clocks_init()
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
+ /*
+ * SCC is needed to boot via mmc after a watchdog reset. The clock code
+ * before conversion to common clk also enabled UART1 (which isn't
+ * handled here and not needed for mmc) and IIM (which is enabled
+ * unconditionally above).
+ */
+ clk_prepare_enable(clk[scc_gate]);
+
imx_print_silicon_rev("i.MX35", mx35_revision());
#ifdef CONFIG_MXC_USE_EPIT
- epit_timer_init(&epit1_clk,
- MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+ epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
#else
- mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
- MX35_INT_GPT);
+ mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
#endif
return 0;
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index fcd94f3b0f0e..a2200c77bf70 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -104,12 +104,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
main_bus_sel, ARRAY_SIZE(main_bus_sel));
- clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+ clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
- clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+ clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
@@ -172,7 +172,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
- clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
@@ -366,8 +366,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
clk_set_rate(clk[esdhc_b_podf], 166250000);
/* System timer */
- mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
- MX51_INT_GPT);
+ mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX51", mx51_revision());
@@ -452,8 +451,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
clk_set_rate(clk[esdhc_b_podf], 200000000);
/* System timer */
- mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
- MX53_INT_GPT);
+ mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
clk_prepare_enable(clk[iim_gate]);
imx_print_silicon_rev("i.MX53", mx53_revision());
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index cab02d0a15d6..e1a17ac7b3b4 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -122,10 +122,6 @@ static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5
"dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
"ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
-static const char * const clks_init_on[] __initconst = {
- "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
-};
-
enum mx6q_clks {
dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
@@ -156,16 +152,20 @@ enum mx6q_clks {
ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
- ssi2_ipg, ssi3_ipg, clk_max
+ ssi2_ipg, ssi3_ipg, rom,
+ clk_max
};
static struct clk *clk[clk_max];
+static enum mx6q_clks const clks_init_on[] __initconst = {
+ mmdc_ch0_axi, rom,
+};
+
int __init mx6q_clocks_init(void)
{
struct device_node *np;
void __iomem *base;
- struct clk *c;
int i, irq;
clk[dummy] = imx_clk_fixed("dummy", 0);
@@ -365,6 +365,7 @@ int __init mx6q_clocks_init(void)
clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
+ clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
@@ -424,21 +425,14 @@ int __init mx6q_clocks_init(void)
clk_register_clkdev(clk[ahb], "ahb", NULL);
clk_register_clkdev(clk[cko1], "cko1", NULL);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
- c = clk_get_sys(clks_init_on[i], NULL);
- if (IS_ERR(c)) {
- pr_err("%s: failed to get clk %s", __func__,
- clks_init_on[i]);
- return PTR_ERR(c);
- }
- clk_prepare_enable(c);
- }
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clk[clks_init_on[i]]);
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
base = of_iomap(np, 0);
WARN_ON(!base);
irq = irq_of_parse_and_map(np, 0);
- mxc_timer_init(NULL, base, irq);
+ mxc_timer_init(base, irq);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
index 4685919deb63..0440379e3628 100644
--- a/arch/arm/mach-imx/clk-pllv2.c
+++ b/arch/arm/mach-imx/clk-pllv2.c
@@ -74,30 +74,15 @@ struct clk_pllv2 {
void __iomem *base;
};
-static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
+ u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
{
long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
- unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
- void __iomem *pllbase;
+ unsigned long dbl;
s64 temp;
- struct clk_pllv2 *pll = to_clk_pllv2(hw);
-
- pllbase = pll->base;
- dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
- if (pll_hfsm == 0) {
- dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
- dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
- dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
- } else {
- dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
- dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
- dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
- }
pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
mfi = (mfi <= 5) ? 5 : mfi;
@@ -123,18 +108,30 @@ static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
return temp;
}
-static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
+ u32 dp_op, dp_mfd, dp_mfn, dp_ctl;
+ void __iomem *pllbase;
struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+ pllbase = pll->base;
+
+ dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+ dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+ dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+
+ return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn);
+}
+
+static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
+ u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn)
+{
u32 reg;
- void __iomem *pllbase;
long mfi, pdf, mfn, mfd = 999999;
s64 temp64;
unsigned long quad_parent_rate;
- unsigned long pll_hfsm, dp_ctl;
-
- pllbase = pll->base;
quad_parent_rate = 4 * parent_rate;
pdf = mfi = -1;
@@ -144,25 +141,41 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
pdf--;
- temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
- do_div(temp64, quad_parent_rate/1000000);
+ temp64 = rate * (pdf + 1) - quad_parent_rate * mfi;
+ do_div(temp64, quad_parent_rate / 1000000);
mfn = (long)temp64;
+ reg = mfi << 4 | pdf;
+
+ *dp_op = reg;
+ *dp_mfd = mfd;
+ *dp_mfn = mfn;
+
+ return 0;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ void __iomem *pllbase;
+ u32 dp_ctl, dp_op, dp_mfd, dp_mfn;
+ int ret;
+
+ pllbase = pll->base;
+
+
+ ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn);
+ if (ret)
+ return ret;
+
dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
/* use dpdck0_2 */
__raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
- pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
- if (pll_hfsm == 0) {
- reg = mfi << 4 | pdf;
- __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
- __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
- __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
- } else {
- reg = mfi << 4 | pdf;
- __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
- __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
- __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
- }
+
+ __raw_writel(dp_op, pllbase + MXC_PLL_DP_OP);
+ __raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD);
+ __raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN);
return 0;
}
@@ -170,7 +183,11 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- return rate;
+ u32 dp_op, dp_mfd, dp_mfn;
+
+ __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+ return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
+ dp_op, dp_mfd, dp_mfn);
}
static int clk_pllv2_prepare(struct clk_hw *hw)
diff --git a/arch/arm/mach-imx/crm-regs-imx5.h b/arch/arm/mach-imx/crm-regs-imx5.h
index 5e11ba7daee2..5e3f1f0f4cab 100644
--- a/arch/arm/mach-imx/crm-regs-imx5.h
+++ b/arch/arm/mach-imx/crm-regs-imx5.h
@@ -23,7 +23,7 @@
#define MX53_DPLL1_BASE MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
#define MX53_DPLL2_BASE MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
#define MX53_DPLL3_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
-#define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
+#define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
/* PLL Register Offsets */
#define MXC_PLL_DP_CTL 0x00
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 89493abd497c..20ed2d56c1af 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <asm/cacheflush.h>
+#include <asm/cp15.h>
#include <mach/common.h>
int platform_cpu_kill(unsigned int cpu)
@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu)
return 1;
}
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ "mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %3\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
/*
* platform-specific code to shutdown a CPU
*
@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
- flush_cache_all();
+ cpu_enter_lowpower();
imx_enable_cpu(cpu, false);
cpu_do_idle();
+ cpu_leave_lowpower();
/* We should never return from idle */
panic("cpu %d unexpectedly exit from shutdown\n", cpu);
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index c515f8ede1a1..6450303f1a7a 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -70,7 +70,6 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
I2C_BOARD_INFO("pcf8563", 0x51),
}, {
I2C_BOARD_INFO("tsc2007", 0x48),
- .type = "tsc2007",
.platform_data = &tsc2007_info,
.irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
},
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index ac50f1671e38..1e09de50cbcd 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -142,7 +142,6 @@ static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = {
I2C_BOARD_INFO("pcf8563", 0x51),
}, {
I2C_BOARD_INFO("tsc2007", 0x49),
- .type = "tsc2007",
.platform_data = &tsc2007_info,
},
};
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index dff82eb57cd9..ba09552fe5fe 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -38,7 +38,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
#include <mach/common.h>
#include <mach/iomux-mx27.h>
@@ -116,6 +116,8 @@ static const int visstrim_m10_pins[] __initconst = {
PB23_PF_USB_PWR,
PB24_PF_USB_OC,
/* CSI */
+ TVP5150_RSTN | GPIO_GPIO | GPIO_OUT,
+ TVP5150_PWDN | GPIO_GPIO | GPIO_OUT,
PB10_PF_CSI_D0,
PB11_PF_CSI_D1,
PB12_PF_CSI_D2,
@@ -147,6 +149,24 @@ static struct gpio visstrim_m10_version_gpios[] = {
{ MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" },
};
+static const struct gpio visstrim_m10_gpios[] __initconst = {
+ {
+ .gpio = TVP5150_RSTN,
+ .flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
+ .label = "tvp5150_rstn",
+ },
+ {
+ .gpio = TVP5150_PWDN,
+ .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+ .label = "tvp5150_pwdn",
+ },
+ {
+ .gpio = OTG_PHY_CS_GPIO,
+ .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+ .label = "usbotg_cs",
+ },
+};
+
/* Camera */
static int visstrim_camera_power(struct device *dev, int on)
{
@@ -190,13 +210,6 @@ static void __init visstrim_camera_init(void)
struct platform_device *pdev;
int dma;
- /* Initialize tvp5150 gpios */
- mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT);
- mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT);
- gpio_set_value(TVP5150_RSTN, 1);
- gpio_set_value(TVP5150_PWDN, 0);
- ndelay(1);
-
gpio_set_value(TVP5150_PWDN, 1);
ndelay(1);
gpio_set_value(TVP5150_RSTN, 0);
@@ -377,10 +390,6 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
/* USB OTG */
static int otg_phy_init(struct platform_device *pdev)
{
- gpio_set_value(OTG_PHY_CS_GPIO, 0);
-
- mdelay(10);
-
return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
}
@@ -435,6 +444,11 @@ static void __init visstrim_m10_board_init(void)
if (ret)
pr_err("Failed to setup pins (%d)\n", ret);
+ ret = gpio_request_array(visstrim_m10_gpios,
+ ARRAY_SIZE(visstrim_m10_gpios));
+ if (ret)
+ pr_err("Failed to request gpios (%d)\n", ret);
+
imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
imx27_add_imx_uart0(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index d14bbe949a4f..3e7401fca76c 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -32,7 +32,7 @@
* Memory-mapped I/O on MX21ADS base board
*/
#define MX21ADS_MMIO_BASE_ADDR 0xf5000000
-#define MX21ADS_MMIO_SIZE SZ_16M
+#define MX21ADS_MMIO_SIZE 0xc00000
#define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \
(MX21ADS_MMIO_BASE_ADDR + (offset))
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 967ed5b35a45..a8983b9778d1 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -86,6 +86,7 @@ static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
void __init imx3_init_l2x0(void)
{
+#ifdef CONFIG_CACHE_L2X0
void __iomem *l2x0_base;
void __iomem *clkctl_base;
@@ -115,6 +116,7 @@ void __init imx3_init_l2x0(void)
}
l2x0_init(l2x0_base, 0x00030024, 0x00000000);
+#endif
}
#ifdef CONFIG_SOC_IMX31
@@ -179,6 +181,8 @@ void __init imx31_soc_init(void)
mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
+ pinctrl_provide_dummies();
+
if (to_version == 1) {
strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin",
strlen(imx31_sdma_pdata.fw_name));
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index feeee17da96b..1d003053d562 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -202,6 +202,8 @@ void __init imx51_soc_init(void)
mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
+ pinctrl_provide_dummies();
+
/* i.mx51 has the i.mx35 type sdma */
imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata);
diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c
index 2222c5739519..b0d3cc49269d 100644
--- a/arch/arm/mach-kirkwood/board-iconnect.c
+++ b/arch/arm/mach-kirkwood/board-iconnect.c
@@ -20,9 +20,6 @@
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
#include <linux/leds.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 25fb3fd418ef..f261cd242643 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -159,6 +159,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
gate_fn->gate.flags = clk_gate_flags;
gate_fn->gate.lock = lock;
gate_fn->gate.hw.init = &init;
+ gate_fn->fn = fn;
/* ops is the gate ops, but with our disable function */
if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
@@ -193,9 +194,11 @@ static struct clk __init *kirkwood_register_gate_fn(const char *name,
bit_idx, 0, &gating_lock, fn);
}
+static struct clk *ge0, *ge1;
+
void __init kirkwood_clk_init(void)
{
- struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+ struct clk *runit, *sata0, *sata1, *usb0, *sdio;
struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
@@ -257,6 +260,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
IRQ_KIRKWOOD_GE00_ERR);
+ /* The interface forgets the MAC address assigned by u-boot if
+ the clock is turned off, so claim the clk now. */
+ clk_prepare_enable(ge0);
}
@@ -268,6 +274,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
IRQ_KIRKWOOD_GE01_ERR);
+ clk_prepare_enable(ge1);
}
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 3eee37a3b501..a115142f8690 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -38,6 +38,7 @@
#define IRQ_MASK_HIGH_OFF 0x0014
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#define L2_CONFIG_REG (BRIDGE_VIRT_BASE | 0x0128)
#define L2_WRITETHROUGH 0x00000010
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index fede3d503efa..c5b68510776b 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -80,6 +80,7 @@
#define UART1_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2100)
#define BRIDGE_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x20000)
#define CRYPTO_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x30000)
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644
index 0e135a599f3e..000000000000
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio) ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
diff --git a/arch/arm/mach-mmp/irq.c b/arch/arm/mach-mmp/irq.c
index fcfe0e3bd701..e60c7d98922b 100644
--- a/arch/arm/mach-mmp/irq.c
+++ b/arch/arm/mach-mmp/irq.c
@@ -241,6 +241,7 @@ void __init mmp2_init_icu(void)
icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
icu_data[1].nr_irqs = 2;
+ icu_data[1].cascade_irq = 4;
icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
icu_data[1].virq_base, 0,
@@ -249,6 +250,7 @@ void __init mmp2_init_icu(void)
icu_data[2].reg_status = mmp_icu_base + 0x154;
icu_data[2].reg_mask = mmp_icu_base + 0x16c;
icu_data[2].nr_irqs = 2;
+ icu_data[2].cascade_irq = 5;
icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
icu_data[2].virq_base, 0,
@@ -257,6 +259,7 @@ void __init mmp2_init_icu(void)
icu_data[3].reg_status = mmp_icu_base + 0x180;
icu_data[3].reg_mask = mmp_icu_base + 0x17c;
icu_data[3].nr_irqs = 3;
+ icu_data[3].cascade_irq = 9;
icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
icu_data[3].virq_base, 0,
@@ -265,6 +268,7 @@ void __init mmp2_init_icu(void)
icu_data[4].reg_status = mmp_icu_base + 0x158;
icu_data[4].reg_mask = mmp_icu_base + 0x170;
icu_data[4].nr_irqs = 5;
+ icu_data[4].cascade_irq = 17;
icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
icu_data[4].virq_base, 0,
@@ -273,6 +277,7 @@ void __init mmp2_init_icu(void)
icu_data[5].reg_status = mmp_icu_base + 0x15c;
icu_data[5].reg_mask = mmp_icu_base + 0x174;
icu_data[5].nr_irqs = 15;
+ icu_data[5].cascade_irq = 35;
icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
icu_data[5].virq_base, 0,
@@ -281,6 +286,7 @@ void __init mmp2_init_icu(void)
icu_data[6].reg_status = mmp_icu_base + 0x160;
icu_data[6].reg_mask = mmp_icu_base + 0x178;
icu_data[6].nr_irqs = 2;
+ icu_data[6].cascade_irq = 51;
icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
icu_data[6].virq_base, 0,
@@ -289,6 +295,7 @@ void __init mmp2_init_icu(void)
icu_data[7].reg_status = mmp_icu_base + 0x188;
icu_data[7].reg_mask = mmp_icu_base + 0x184;
icu_data[7].nr_irqs = 2;
+ icu_data[7].cascade_irq = 55;
icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
icu_data[7].virq_base, 0,
diff --git a/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h b/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
index c64dbb96dbad..eb187e0e059b 100644
--- a/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
+++ b/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
@@ -31,5 +31,6 @@
#define IRQ_MASK_HIGH_OFF 0x0014
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#endif
diff --git a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
index 3674497162e3..e807c4c52a0b 100644
--- a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
+++ b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
@@ -42,6 +42,7 @@
#define MV78XX0_CORE0_REGS_PHYS_BASE 0xf1020000
#define MV78XX0_CORE1_REGS_PHYS_BASE 0xf1024000
#define MV78XX0_CORE_REGS_VIRT_BASE 0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE 0xfe400000
#define MV78XX0_CORE_REGS_SIZE SZ_16K
#define MV78XX0_PCIE_IO_PHYS_BASE(i) (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
* Core-specific peripheral registers.
*/
#define BRIDGE_VIRT_BASE (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE (MV78XX0_CORE_REGS_PHYS_BASE)
/*
* Register Map
diff --git a/arch/arm/mach-mxs/mach-apx4devkit.c b/arch/arm/mach-mxs/mach-apx4devkit.c
index 5e90b9dcdef8..f5f061757deb 100644
--- a/arch/arm/mach-mxs/mach-apx4devkit.c
+++ b/arch/arm/mach-mxs/mach-apx4devkit.c
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
return 0;
}
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+ struct clk *clk;
+
+ /* Enable fec phy clock */
+ clk = clk_get_sys("enet_out", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
static void __init apx4devkit_init(void)
{
mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
apx4devkit_phy_fixup);
+ apx4devkit_fec_phy_clk_enable();
mx28_add_fec(0, &mx28_fec_pdata);
mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index 70a81f900bb5..53c39d239d6e 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -97,11 +97,6 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
gpmc_onenand_init(&board_onenand_data);
}
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
#endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
#if defined(CONFIG_MTD_NAND_OMAP2) || \
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 8ca14e88a31a..2c5d0ed75285 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -83,11 +83,9 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data tusb_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
- .mode = MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
+#else
.mode = MUSB_HOST,
#endif
.set_power = tusb_set_power,
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 79c6909eeb78..580fd17208da 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -81,13 +81,13 @@ static u8 omap3_beagle_version;
static struct {
int mmc1_gpio_wp;
int usb_pwr_level;
- int reset_gpio;
+ int dvi_pd_gpio;
int usr_button_gpio;
int mmc_caps;
} beagle_config = {
.mmc1_gpio_wp = -EINVAL,
.usb_pwr_level = GPIOF_OUT_INIT_LOW,
- .reset_gpio = 129,
+ .dvi_pd_gpio = -EINVAL,
.usr_button_gpio = 4,
.mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
};
@@ -126,21 +126,21 @@ static void __init omap3_beagle_init_rev(void)
printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
beagle_config.mmc1_gpio_wp = 29;
- beagle_config.reset_gpio = 170;
+ beagle_config.dvi_pd_gpio = 170;
beagle_config.usr_button_gpio = 7;
break;
case 6:
printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
beagle_config.mmc1_gpio_wp = 23;
- beagle_config.reset_gpio = 170;
+ beagle_config.dvi_pd_gpio = 170;
beagle_config.usr_button_gpio = 7;
break;
case 5:
printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
beagle_config.mmc1_gpio_wp = 23;
- beagle_config.reset_gpio = 170;
+ beagle_config.dvi_pd_gpio = 170;
beagle_config.usr_button_gpio = 7;
break;
case 0:
@@ -274,11 +274,9 @@ static int beagle_twl_gpio_setup(struct device *dev,
if (r)
pr_err("%s: unable to configure nDVI_PWR_EN\n",
__func__);
- r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
- "DVI_LDO_EN");
- if (r)
- pr_err("%s: unable to configure DVI_LDO_EN\n",
- __func__);
+
+ beagle_config.dvi_pd_gpio = gpio + 2;
+
} else {
/*
* REVISIT: need ehci-omap hooks for external VBUS
@@ -287,7 +285,7 @@ static int beagle_twl_gpio_setup(struct device *dev,
if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
pr_err("%s: unable to configure EHCI_nOC\n", __func__);
}
- dvi_panel.power_down_gpio = beagle_config.reset_gpio;
+ dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level,
"nEN_USB_PWR");
@@ -499,7 +497,7 @@ static void __init omap3_beagle_init(void)
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3_beagle_init_rev();
- if (beagle_config.mmc1_gpio_wp != -EINVAL)
+ if (gpio_is_valid(beagle_config.mmc1_gpio_wp))
omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT);
mmc[0].caps = beagle_config.mmc_caps;
omap_hsmmc_init(mmc);
@@ -510,15 +508,13 @@ static void __init omap3_beagle_init(void)
platform_add_devices(omap3_beagle_devices,
ARRAY_SIZE(omap3_beagle_devices));
+ if (gpio_is_valid(beagle_config.dvi_pd_gpio))
+ omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
omap_display_init(&beagle_dss_data);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
- omap_mux_init_gpio(170, OMAP_PIN_INPUT);
- /* REVISIT leave DVI powered down until it's needed ... */
- gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
-
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 8fa2fc3a4c3c..779734d8ba37 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -494,8 +494,8 @@ static void __init overo_init(void)
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_hsmmc_init(mmc);
overo_i2c_init();
+ omap_hsmmc_init(mmc);
omap_display_init(&overo_dss_data);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ff53deccecab..df2534de3361 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -144,7 +144,6 @@ static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
.release_resources = lis302_release,
.st_min_limits = {-32, 3, 3},
.st_max_limits = {-3, 32, 32},
- .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
};
#endif
@@ -1030,7 +1029,6 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
{
I2C_BOARD_INFO("lis3lv02d", 0x1d),
.platform_data = &rx51_lis3lv02d_data,
- .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
},
#endif
};
@@ -1056,6 +1054,10 @@ static int __init rx51_i2c_init(void)
omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
+#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
+ rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
+ rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
+#endif
omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
return 0;
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 4e1a3b0e8cc8..1efdec236ae8 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3514,7 +3514,7 @@ int __init omap3xxx_clk_init(void)
struct omap_clk *c;
u32 cpu_clkflg = 0;
- if (cpu_is_omap3517()) {
+ if (soc_is_am35xx()) {
cpu_mask = RATE_IN_34XX;
cpu_clkflg = CK_AM35XX;
} else if (cpu_is_omap3630()) {
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 2172f6603848..ba6f9a0a43e9 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -84,6 +84,7 @@ static struct clk slimbus_clk = {
static struct clk sys_32k_ck = {
.name = "sys_32k_ck",
+ .clkdm_name = "prm_clkdm",
.rate = 32768,
.ops = &clkops_null,
};
@@ -512,6 +513,7 @@ static struct clk ddrphy_ck = {
.name = "ddrphy_ck",
.parent = &dpll_core_m2_ck,
.ops = &clkops_null,
+ .clkdm_name = "l3_emif_clkdm",
.fixed_div = 2,
.recalc = &omap_fixed_divisor_recalc,
};
@@ -769,6 +771,7 @@ static const struct clksel dpll_mpu_m2_div[] = {
static struct clk dpll_mpu_m2_ck = {
.name = "dpll_mpu_m2_ck",
.parent = &dpll_mpu_ck,
+ .clkdm_name = "cm_clkdm",
.clksel = dpll_mpu_m2_div,
.clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU,
.clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -1149,6 +1152,7 @@ static const struct clksel l3_div_div[] = {
static struct clk l3_div_ck = {
.name = "l3_div_ck",
.parent = &div_core_ck,
+ .clkdm_name = "cm_clkdm",
.clksel = l3_div_div,
.clksel_reg = OMAP4430_CM_CLKSEL_CORE,
.clksel_mask = OMAP4430_CLKSEL_L3_MASK,
@@ -2824,6 +2828,7 @@ static const struct clksel trace_clk_div_div[] = {
static struct clk trace_clk_div_ck = {
.name = "trace_clk_div_ck",
.parent = &pmd_trace_clk_mux_ck,
+ .clkdm_name = "emu_sys_clkdm",
.clksel = trace_clk_div_div,
.clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
@@ -3412,9 +3417,12 @@ int __init omap4xxx_clk_init(void)
if (cpu_is_omap443x()) {
cpu_mask = RATE_IN_4430;
cpu_clkflg = CK_443X;
- } else if (cpu_is_omap446x()) {
+ } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
cpu_mask = RATE_IN_4460 | RATE_IN_4430;
cpu_clkflg = CK_446X | CK_443X;
+
+ if (cpu_is_omap447x())
+ pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
} else {
return 0;
}
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index f7b58609bad8..6227e9505c2d 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -31,12 +31,16 @@
*
* CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
* clockdomain. (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ * active whenever the MPU is active. True for interconnects and
+ * the WKUP clockdomains.
*/
#define CLKDM_CAN_FORCE_SLEEP (1 << 0)
#define CLKDM_CAN_FORCE_WAKEUP (1 << 1)
#define CLKDM_CAN_ENABLE_AUTO (1 << 2)
#define CLKDM_CAN_DISABLE_AUTO (1 << 3)
#define CLKDM_NO_AUTODEPS (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU (1 << 5)
#define CLKDM_CAN_HWSUP (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
#define CLKDM_CAN_SWSUP (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
diff --git a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
index 839145e1cfbe..4972219653ce 100644
--- a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
+++ b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
@@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
.name = "wkup_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.dep_bit = OMAP_EN_WKUP_SHIFT,
+ .flags = CLKDM_ACTIVE_WITH_MPU,
};
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index c53425847493..7f2133abe7d3 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
.cm_inst = OMAP4430_PRM_WKUP_CM_INST,
.clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
.dep_bit = OMAP4430_L4WKUP_STATDEP_SHIFT,
- .flags = CLKDM_CAN_HWSUP,
+ .flags = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
};
static struct clockdomain emu_sys_44xx_clkdm = {
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index a7bc096bd407..f24e3f7a2bbc 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -22,4 +22,15 @@
*/
#define MAX_MODULE_READY_TIME 2000
+/*
+ * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for
+ * the PRCM to request that a module enter the inactive state in the
+ * case of OMAP2 & 3. In the case of OMAP4 this is the max duration
+ * in microseconds for the module to reach the inactive state from
+ * a functional state.
+ * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during
+ * kernel init.
+ */
+#define MAX_MODULE_DISABLE_TIME 5000
+
#endif
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 8c86d294b1a3..1a39945d9ff8 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -313,9 +313,9 @@ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_off
omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) ==
CLKCTRL_IDLEST_DISABLED),
- MAX_MODULE_READY_TIME, i);
+ MAX_MODULE_DISABLE_TIME, i);
- return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+ return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY;
}
/**
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 54d49ddb9b81..5fb47a14f4ba 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
goto err;
}
- r = omap_device_register(pdev);
+ r = platform_device_add(pdev);
if (r) {
- pr_err("Could not register omap_device for %s\n", pdev_name);
+ pr_err("Could not register platform_device for %s\n", pdev_name);
goto err;
}
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 845309f146fe..88ffa1e645cd 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -20,6 +20,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+
+#include <asm/memblock.h>
+
#include "cm2xxx_3xxx.h"
#include "prm2xxx_3xxx.h"
#ifdef CONFIG_BRIDGE_DVFS
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 0389b3264abe..00486a8564fd 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -247,6 +247,17 @@ void __init omap3xxx_check_features(void)
omap_features |= OMAP3_HAS_SDRC;
/*
+ * am35x fixups:
+ * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as
+ * reserved and therefore return 0 when read. Unfortunately,
+ * OMAP3_CHECK_FEATURE() will interpret some of those zeroes to
+ * mean that a feature is present even though it isn't so clear
+ * the incorrectly set feature bits.
+ */
+ if (soc_is_am35xx())
+ omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP);
+
+ /*
* TODO: Get additional info (where applicable)
* e.g. Size of L2 cache.
*/
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index fdc4303be563..6038a8c84b74 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -149,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
ct->chip.irq_ack = omap_mask_ack_irq;
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
ct->regs.enable = INTC_MIR_CLEAR0;
ct->regs.disable = INTC_MIR_SET0;
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 80e55c5c9998..9fe6829f4c16 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -41,6 +41,7 @@
#include "control.h"
#include "mux.h"
#include "prm.h"
+#include "common.h"
#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
#define OMAP_MUX_BASE_SZ 0x5ca
@@ -217,8 +218,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
return -ENODEV;
}
-static int __init
-omap_mux_get_by_name(const char *muxname,
+int __init omap_mux_get_by_name(const char *muxname,
struct omap_mux_partition **found_partition,
struct omap_mux **found_mux)
{
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 69fe060a0b75..471e62a74a16 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -59,6 +59,7 @@
#define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN
#define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+#define OMAP_MODE_UART(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
/* Flags for omapX_mux_init */
#define OMAP_PACKAGE_MASK 0xffff
@@ -225,8 +226,18 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
*/
void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
+int omap_mux_get_by_name(const char *muxname,
+ struct omap_mux_partition **found_partition,
+ struct omap_mux **found_mux);
#else
+static inline int omap_mux_get_by_name(const char *muxname,
+ struct omap_mux_partition **found_partition,
+ struct omap_mux **found_mux)
+{
+ return 0;
+}
+
static inline int omap_mux_init_gpio(int gpio, int val)
{
return 0;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index bf86f7e8f91f..2d710f50fca2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -530,7 +530,7 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
- _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+ _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v);
/* XXX test pwrdm_get_wken for this hwmod's subsystem */
@@ -1124,15 +1124,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
* _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
* @oh: struct omap_hwmod *
*
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle. If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby. No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes. No return value.
*/
static void _enable_sysc(struct omap_hwmod *oh)
{
u8 idlemode, sf;
u32 v;
+ bool clkdm_act;
if (!oh->class->sysc)
return;
@@ -1141,8 +1144,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
sf = oh->class->sysc->sysc_flags;
if (sf & SYSC_HAS_SIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
- HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+ clkdm_act = ((oh->clkdm &&
+ oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+ (oh->_clk && oh->_clk->clkdm &&
+ oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+ if (clkdm_act && !(oh->class->sysc->idlemodes &
+ (SIDLE_SMART | SIDLE_SMART_WKUP)))
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ else
+ idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
+ HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
_set_slave_idlemode(oh, idlemode, &v);
}
@@ -1208,8 +1219,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
sf = oh->class->sysc->sysc_flags;
if (sf & SYSC_HAS_SIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
- HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+ /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
+ if (oh->flags & HWMOD_SWSUP_SIDLE ||
+ !(oh->class->sysc->idlemodes &
+ (SIDLE_SMART | SIDLE_SMART_WKUP)))
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
_set_slave_idlemode(oh, idlemode, &v);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 950454a3fa31..b7bcba5221ba 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -393,8 +393,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0004,
.sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO),
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -854,6 +853,11 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
.name = "dss_hdmi",
.class = &omap44xx_hdmi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
+ /*
+ * HDMI audio requires to use no-idle mode. Hence,
+ * set idle mode by software.
+ */
+ .flags = HWMOD_SWSUP_SIDLE,
.mpu_irqs = omap44xx_dss_hdmi_irqs,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
.main_clk = "dss_48mhz_clk",
@@ -1924,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1959,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1994,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2029,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -3860,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
};
/* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
.master = &omap44xx_usb_host_fs_hwmod,
.slave = &omap44xx_l3_main_2_hwmod,
.clk = "l3_div_ck",
@@ -3918,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
};
/* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
.master = &omap44xx_aess_hwmod,
.slave = &omap44xx_l4_abe_hwmod,
.clk = "ocp_abe_iclk",
@@ -4009,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
};
/* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
.master = &omap44xx_l4_abe_hwmod,
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
@@ -4027,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
};
/* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
.master = &omap44xx_l4_abe_hwmod,
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
@@ -5853,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
};
/* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
.master = &omap44xx_l4_cfg_hwmod,
.slave = &omap44xx_usb_host_fs_hwmod,
.clk = "l4_div_ck",
@@ -6010,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_iva__l3_main_2,
&omap44xx_l3_main_1__l3_main_2,
&omap44xx_l4_cfg__l3_main_2,
- &omap44xx_usb_host_fs__l3_main_2,
+ /* &omap44xx_usb_host_fs__l3_main_2, */
&omap44xx_usb_host_hs__l3_main_2,
&omap44xx_usb_otg_hs__l3_main_2,
&omap44xx_l3_main_1__l3_main_3,
&omap44xx_l3_main_2__l3_main_3,
&omap44xx_l4_cfg__l3_main_3,
- &omap44xx_aess__l4_abe,
+ /* &omap44xx_aess__l4_abe, */
&omap44xx_dsp__l4_abe,
&omap44xx_l3_main_1__l4_abe,
&omap44xx_mpu__l4_abe,
@@ -6025,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__l4_wkup,
&omap44xx_mpu__mpu_private,
&omap44xx_l4_cfg__ocp_wp_noc,
- &omap44xx_l4_abe__aess,
- &omap44xx_l4_abe__aess_dma,
+ /* &omap44xx_l4_abe__aess, */
+ /* &omap44xx_l4_abe__aess_dma, */
&omap44xx_l3_main_2__c2c,
&omap44xx_l4_wkup__counter_32k,
&omap44xx_l4_cfg__ctrl_module_core,
@@ -6132,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__uart2,
&omap44xx_l4_per__uart3,
&omap44xx_l4_per__uart4,
- &omap44xx_l4_cfg__usb_host_fs,
+ /* &omap44xx_l4_cfg__usb_host_fs, */
&omap44xx_l4_cfg__usb_host_hs,
&omap44xx_l4_cfg__usb_otg_hs,
&omap44xx_l4_cfg__usb_tll_hs,
diff --git a/arch/arm/mach-omap2/omap_l3_smx.c b/arch/arm/mach-omap2/omap_l3_smx.c
index a05a62f9ee5b..acc216491b8a 100644
--- a/arch/arm/mach-omap2/omap_l3_smx.c
+++ b/arch/arm/mach-omap2/omap_l3_smx.c
@@ -155,10 +155,11 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
u8 multi = error & L3_ERROR_LOG_MULTI;
u32 address = omap3_l3_decode_addr(error_addr);
- WARN(true, "%s seen by %s %s at address %x\n",
+ pr_err("%s seen by %s %s at address %x\n",
omap3_l3_code_string(code),
omap3_l3_initiator_string(initid),
multi ? "Multiple Errors" : "", address);
+ WARN_ON(1);
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 4c90477e6f82..d52651a05daa 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -239,21 +239,15 @@ void am35x_set_mode(u8 musb_mode)
devconf2 &= ~CONF2_OTGMODE;
switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case MUSB_HOST: /* Force VBUS valid, ID = 0 */
devconf2 |= CONF2_FORCE_HOST;
break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
devconf2 |= CONF2_FORCE_DEVICE;
break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
case MUSB_OTG: /* Don't override the VBUS/ID comparators */
devconf2 |= CONF2_NO_OVERRIDE;
break;
-#endif
default:
pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
}
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index a34023d0ca7c..3a595e899724 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -724,6 +724,7 @@ int __init omap3_pm_init(void)
ret = request_irq(omap_prcm_event_to_irq("io"),
_prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
omap3_pm_init);
+ enable_irq(omap_prcm_event_to_irq("io"));
if (ret) {
pr_err("pm: Failed to request pm_io irq\n");
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index 9ce765407ad5..21cb74003a56 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include "common.h"
#include <plat/cpu.h>
@@ -303,8 +304,15 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask)
static int __init omap3xxx_prcm_init(void)
{
- if (cpu_is_omap34xx())
- return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
- return 0;
+ int ret = 0;
+
+ if (cpu_is_omap34xx()) {
+ ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
+ if (!ret)
+ irq_set_status_flags(omap_prcm_event_to_irq("io"),
+ IRQ_NOAUTOEN);
+ }
+
+ return ret;
}
subsys_initcall(omap3xxx_prcm_init);
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 292d4aaca068..c1b93c752d70 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -57,6 +57,7 @@ struct omap_uart_state {
struct list_head node;
struct omap_hwmod *oh;
+ struct omap_device_pad default_omap_uart_pads[2];
};
static LIST_HEAD(uart_list);
@@ -126,11 +127,70 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {}
#endif /* CONFIG_PM */
#ifdef CONFIG_OMAP_MUX
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
+
+#define OMAP_UART_DEFAULT_PAD_NAME_LEN 28
+static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
+ tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
+
+static void __init
+omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
+ struct omap_uart_state *uart)
+{
+ uart->default_omap_uart_pads[0].name = rx_pad_name;
+ uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
+ OMAP_DEVICE_PAD_WAKEUP;
+ uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
+ OMAP_MUX_MODE0;
+ uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
+ uart->default_omap_uart_pads[1].name = tx_pad_name;
+ uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
+ OMAP_MUX_MODE0;
+ bdata->pads = uart->default_omap_uart_pads;
+ bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
+}
+
+static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+ struct omap_uart_state *uart)
{
+ struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
+ struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
+ char *rx_fmt, *tx_fmt;
+ int uart_nr = bdata->id + 1;
+
+ if (bdata->id != 2) {
+ rx_fmt = "uart%d_rx.uart%d_rx";
+ tx_fmt = "uart%d_tx.uart%d_tx";
+ } else {
+ rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
+ tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
+ }
+
+ snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
+ uart_nr, uart_nr);
+ snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
+ uart_nr, uart_nr);
+
+ if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
+ omap_mux_get_by_name
+ (tx_pad_name, &tx_partition, &tx_mux) >= 0) {
+ u16 tx_mode, rx_mode;
+
+ tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
+ rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
+
+ /*
+ * Check if uart is used in default tx/rx mode i.e. in mux mode0
+ * if yes then configure rx pin for wake up capability
+ */
+ if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
+ omap_serial_fill_uart_tx_rx_pads(bdata, uart);
+ }
}
#else
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+ struct omap_uart_state *uart)
+{
+}
#endif
static char *cmdline_find_option(char *str)
@@ -287,8 +347,7 @@ void __init omap_serial_board_init(struct omap_uart_port_info *info)
bdata.pads = NULL;
bdata.pads_cnt = 0;
- if (cpu_is_omap44xx() || cpu_is_omap34xx())
- omap_serial_fill_default_pads(&bdata);
+ omap_serial_check_wakeup(&bdata, uart);
if (!info)
omap_serial_init_port(&bdata, NULL);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 119d5a910f3a..43a979075338 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -32,6 +32,7 @@
#include "twl-common.h"
#include "pm.h"
#include "voltage.h"
+#include "mux.h"
static struct i2c_board_info __initdata pmic_i2c_board_info = {
.addr = 0x48,
@@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
struct twl6040_platform_data *twl6040_data, int twl6040_irq)
{
/* PMIC part*/
+ omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
strncpy(omap4_i2c1_board_info[0].type, pmic_type,
sizeof(omap4_i2c1_board_info[0].type));
omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index b19d1b43c12e..c4a576856661 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -41,12 +41,10 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_MUSB_OTG
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#else
.mode = MUSB_HOST,
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
- .mode = MUSB_PERIPHERAL,
#endif
/* .clock is set dynamically */
.config = &musb_config,
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index db84a46ce7fd..805bea6edf17 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -300,7 +300,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
printk(error, 3, status);
return status;
}
- tusb_resources[2].start = irq + IH_GPIO_BASE;
+ tusb_resources[2].start = gpio_to_irq(irq);
/* set up memory timings ... can speed them up later */
if (!ps_refclk) {
diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h
index 96484bcd34ca..11a3c1e9801f 100644
--- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h
+++ b/arch/arm/mach-orion5x/include/mach/bridge-regs.h
@@ -35,5 +35,5 @@
#define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE | 0x204)
#define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE | 0x300)
-
+#define TIMER_PHYS_BASE (ORION5X_BRIDGE_PHYS_BASE | 0x300)
#endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
new file mode 100644
index 000000000000..1aa5d0a50a0b
--- /dev/null
+++ b/arch/arm/mach-orion5x/include/mach/io.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-orion5x/include/mach/io.h
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#include <mach/orion5x.h>
+#include <asm/sizes.h>
+
+#define IO_SPACE_LIMIT SZ_2M
+static inline void __iomem *__io(unsigned long addr)
+{
+ return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE);
+}
+
+#define __io(a) __io(a)
+#endif
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
index 2745f5d95b3f..683e085ce162 100644
--- a/arch/arm/mach-orion5x/include/mach/orion5x.h
+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
@@ -82,6 +82,7 @@
#define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE | 0x2100)
#define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x20000)
+#define ORION5X_BRIDGE_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x20000)
#define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x30000)
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index d09da6a746b8..d3de84b0dcbe 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO19_SSP2_SCLK,
GPIO86_SSP2_RXD,
GPIO87_SSP2_TXD,
- GPIO88_GPIO,
+ GPIO88_GPIO | MFP_LPM_DRIVE_HIGH, /* TSC2046_CS */
+
+ /* BQ24022 Regulator */
+ GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_nCHARGE_EN */
+ GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_ISET2 */
/* HX4700 specific input GPIOs */
GPIO12_GPIO | WAKEUP_ON_EDGE_RISE, /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO14_GPIO, /* nWLAN_IRQ */
/* HX4700 specific output GPIOs */
+ GPIO61_GPIO | MFP_LPM_DRIVE_HIGH, /* W3220_nRESET */
+ GPIO71_GPIO | MFP_LPM_DRIVE_HIGH, /* ASIC3_nRESET */
+ GPIO81_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_GP_nRESET */
+ GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_HW_nRESET */
GPIO102_GPIO | MFP_LPM_DRIVE_LOW, /* SYNAPTICS_POWER_ON */
GPIO10_GPIO, /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
{ GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
{ GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
{ GPIO32_HX4700_RS232_ON, GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+ { GPIO61_HX4700_W3220_nRESET, GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
{ GPIO71_HX4700_ASIC3_nRESET, GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+ { GPIO81_HX4700_CPU_GP_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
{ GPIO82_HX4700_EUART_RESET, GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+ { GPIO116_HX4700_CPU_HW_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
};
static void __init hx4700_init(void)
{
int ret;
+ PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 414364eb426c..cb2883d553b5 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
static struct clk s3c2440_clk_ac97 = {
.name = "ac97",
.enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2440_CLKCON_CAMERA,
+ .ctrlbit = S3C2440_CLKCON_AC97,
};
static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 9e37026ef9dd..9bd135531d76 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -779,6 +779,7 @@ DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
.init_irq = r8a7740_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = eva_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
.dt_compat = eva_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9d.c b/arch/arm/mach-shmobile/board-kzm9d.c
index 7bc5e7d39f9b..6a33cf393428 100644
--- a/arch/arm/mach-shmobile/board-kzm9d.c
+++ b/arch/arm/mach-shmobile/board-kzm9d.c
@@ -80,6 +80,7 @@ DT_MACHINE_START(KZM9D_DT, "kzm9d")
.init_irq = emev2_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = kzm9d_add_standard_devices,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
.dt_compat = kzm9d_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index d8e33b682832..c0ae815e7beb 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -455,6 +455,7 @@ DT_MACHINE_START(KZM9G_DT, "kzm9g")
.init_irq = sh73a0_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = kzm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
.dt_compat = kzm9g_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index b577f7c44678..150122a44630 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1512,6 +1512,9 @@ static void __init mackerel_init(void)
gpio_request(GPIO_FN_SDHID0_1, NULL);
gpio_request(GPIO_FN_SDHID0_0, NULL);
+ /* SDHI0 PORT172 card-detect IRQ26 */
+ gpio_request(GPIO_FN_IRQ26_172, NULL);
+
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* enable SDHI1 */
gpio_request(GPIO_FN_SDHICMD1, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 472d1f5361e5..3946c4ba2aa8 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
enum { MSTP001,
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
- MSTP219,
+ MSTP219, MSTP218,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
- MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+ MSTP331, MSTP329, MSTP325, MSTP323,
MSTP314, MSTP313, MSTP312, MSTP311,
MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+ [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
- [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
- CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
diff --git a/arch/arm/mach-shmobile/intc-r8a7779.c b/arch/arm/mach-shmobile/intc-r8a7779.c
index 550b23df4fd4..f04fad4ec4fb 100644
--- a/arch/arm/mach-shmobile/intc-r8a7779.c
+++ b/arch/arm/mach-shmobile/intc-r8a7779.c
@@ -35,6 +35,9 @@
#define INT2SMSKCR3 0xfe7822ac
#define INT2SMSKCR4 0xfe7822b0
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
{
return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@ void __init r8a7779_init_irq(void)
gic_init(0, 29, gic_dist_base, gic_cpu_base);
gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+ /* route all interrupts to ARM */
+ __raw_writel(0xffffffff, INT2NTSR0);
+ __raw_writel(0x3fffffff, INT2NTSR1);
+
/* unmask all known interrupts in INTCS2 */
__raw_writel(0xfffffff0, INT2SMSKCR0);
__raw_writel(0xfff7ffff, INT2SMSKCR1);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index bacdd667e3b1..fde0d23121dc 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -22,10 +22,20 @@
#include <mach/common.h>
#include <mach/emev2.h>
+#ifdef CONFIG_ARCH_SH73A0
#define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
of_machine_is_compatible("renesas,sh73a0"))
+#else
+#define is_sh73a0() (0)
+#endif
+
#define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
#define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
static unsigned int __init shmobile_smp_get_core_count(void)
{
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 6a4bd582c028..fafce9ce8218 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -484,7 +484,7 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
},
};
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
static const struct sh_dmae_channel sh7372_dmae_channels[] = {
{
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
index ea1564609bd4..9e3ae6bfe50d 100644
--- a/arch/arm/mach-spear13xx/include/mach/debug-macro.S
+++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
@@ -4,7 +4,7 @@
* Debugging macro include header spear13xx machine family
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
index 383ab04dc6c9..d50bdb605925 100644
--- a/arch/arm/mach-spear13xx/include/mach/dma.h
+++ b/arch/arm/mach-spear13xx/include/mach/dma.h
@@ -4,7 +4,7 @@
* DMA information for SPEAr13xx machine family
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
index 6d8c45b9f298..dac57fd0cdfd 100644
--- a/arch/arm/mach-spear13xx/include/mach/generic.h
+++ b/arch/arm/mach-spear13xx/include/mach/generic.h
@@ -4,7 +4,7 @@
* spear13xx machine family generic header file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
index cd6f4f86a56b..85f176311f63 100644
--- a/arch/arm/mach-spear13xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear13xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
* GPIO macros for SPEAr13xx machine family
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
index f542a24aa5f2..271a62b4cd31 100644
--- a/arch/arm/mach-spear13xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
@@ -4,7 +4,7 @@
* IRQ helper macros for spear13xx machine family
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
index 30c57ef72686..65f27def239b 100644
--- a/arch/arm/mach-spear13xx/include/mach/spear.h
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -4,7 +4,7 @@
* spear13xx Machine family specific definition
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
index 31af3e8d976e..3a58b8284a6a 100644
--- a/arch/arm/mach-spear13xx/include/mach/timex.h
+++ b/arch/arm/mach-spear13xx/include/mach/timex.h
@@ -4,7 +4,7 @@
* SPEAr3XX machine family specific timex definitions
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
index c7840896ae6e..70fe72f05dea 100644
--- a/arch/arm/mach-spear13xx/include/mach/uncompress.h
+++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h
@@ -4,7 +4,7 @@
* Serial port stubs for kernel decompress status messages
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
index fefd15b2f380..732d29bc7330 100644
--- a/arch/arm/mach-spear13xx/spear1310.c
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -4,7 +4,7 @@
* SPEAr1310 machine source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
index ee38cbc56869..81e4ed76ad06 100644
--- a/arch/arm/mach-spear13xx/spear1340.c
+++ b/arch/arm/mach-spear13xx/spear1340.c
@@ -4,7 +4,7 @@
* SPEAr1340 machine source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
index 50b349ae863d..cf936b106e27 100644
--- a/arch/arm/mach-spear13xx/spear13xx.c
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -4,7 +4,7 @@
* SPEAr13XX machines common source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/debug-macro.S b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
index 590519f10d6e..0a6381fad5d9 100644
--- a/arch/arm/mach-spear3xx/include/mach/debug-macro.S
+++ b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
@@ -4,7 +4,7 @@
* Debugging macro include header spear3xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index 4a95b9453c2a..ce19113ca791 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -4,7 +4,7 @@
* SPEAr3XX machine family generic header file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/gpio.h b/arch/arm/mach-spear3xx/include/mach/gpio.h
index 451b2081bfc9..2ac74c6db7f1 100644
--- a/arch/arm/mach-spear3xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear3xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
* GPIO macros for SPEAr3xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 51bd62a0254c..803de76f5f36 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -4,7 +4,7 @@
* IRQ helper macros for SPEAr3xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index 18e2ac576f25..6309bf68d6f8 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
* Miscellaneous registers definitions for SPEAr3xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
index 51eb953148a9..8cca95193d4d 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -4,7 +4,7 @@
* SPEAr3xx Machine family specific definition
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/timex.h b/arch/arm/mach-spear3xx/include/mach/timex.h
index a38cc9de876f..9f5d08bd0c44 100644
--- a/arch/arm/mach-spear3xx/include/mach/timex.h
+++ b/arch/arm/mach-spear3xx/include/mach/timex.h
@@ -4,7 +4,7 @@
* SPEAr3XX machine family specific timex definitions
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/uncompress.h b/arch/arm/mach-spear3xx/include/mach/uncompress.h
index 53ba8bbc0dfa..b909b011f7c8 100644
--- a/arch/arm/mach-spear3xx/include/mach/uncompress.h
+++ b/arch/arm/mach-spear3xx/include/mach/uncompress.h
@@ -4,7 +4,7 @@
* Serial port stubs for kernel decompress status messages
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index f74a05bdb829..0f882ecb7d81 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -4,7 +4,7 @@
* SPEAr300 machine source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 84dfb0900747..bbcf4571d361 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -4,7 +4,7 @@
* SPEAr310 machine source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index a88fa841d29d..88d483bcd66a 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -4,7 +4,7 @@
* SPEAr320 machine source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index f22419ed74a8..66db5f13af84 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -4,7 +4,7 @@
* SPEAr3XX machines common source file
*
* Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -87,7 +87,7 @@ void __init spear3xx_map_io(void)
static void __init spear3xx_timer_init(void)
{
- char pclk_name[] = "pll3_48m_clk";
+ char pclk_name[] = "pll3_clk";
struct clk *gpt_clk, *pclk;
spear3xx_clk_init();
diff --git a/arch/arm/mach-spear6xx/include/mach/gpio.h b/arch/arm/mach-spear6xx/include/mach/gpio.h
index 3a789dbb69f7..d42cefc0356d 100644
--- a/arch/arm/mach-spear6xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear6xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
* GPIO macros for SPEAr6xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 179e45774b3a..c34acc201d34 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
* Miscellaneous registers definitions for SPEAr6xx machine family
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index 2e2e3596583e..9af67d003c62 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -423,7 +423,7 @@ void __init spear6xx_map_io(void)
static void __init spear6xx_timer_init(void)
{
- char pclk_name[] = "pll3_48m_clk";
+ char pclk_name[] = "pll3_clk";
struct clk *gpt_clk, *pclk;
spear6xx_clk_init();
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 4d6a2ee99c3b..5beb7ebe2948 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -33,7 +33,7 @@
static bool is_enabled;
-static void tegra_cpu_reset_handler_enable(void)
+static void __init tegra_cpu_reset_handler_enable(void)
{
void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE);
void __iomem *evp_cpu_reset =
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 9c74ac545849..4fd93f5c49ec 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -580,43 +580,12 @@ static void ux500_uart0_reset(void)
udelay(1);
}
-/* This needs to be referenced by callbacks */
-struct pinctrl *u0_p;
-struct pinctrl_state *u0_def;
-struct pinctrl_state *u0_sleep;
-
-static void ux500_uart0_init(void)
-{
- int ret;
-
- if (IS_ERR(u0_p) || IS_ERR(u0_def))
- return;
-
- ret = pinctrl_select_state(u0_p, u0_def);
- if (ret)
- pr_err("could not set UART0 defstate\n");
-}
-
-static void ux500_uart0_exit(void)
-{
- int ret;
-
- if (IS_ERR(u0_p) || IS_ERR(u0_sleep))
- return;
-
- ret = pinctrl_select_state(u0_p, u0_sleep);
- if (ret)
- pr_err("could not set UART0 idlestate\n");
-}
-
static struct amba_pl011_data uart0_plat = {
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &uart0_dma_cfg_rx,
.dma_tx_param = &uart0_dma_cfg_tx,
#endif
- .init = ux500_uart0_init,
- .exit = ux500_uart0_exit,
.reset = ux500_uart0_reset,
};
@@ -638,28 +607,7 @@ static struct amba_pl011_data uart2_plat = {
static void __init mop500_uart_init(struct device *parent)
{
- struct amba_device *uart0_device;
-
- uart0_device = db8500_add_uart0(parent, &uart0_plat);
- if (uart0_device) {
- u0_p = pinctrl_get(&uart0_device->dev);
- if (IS_ERR(u0_p))
- dev_err(&uart0_device->dev,
- "could not get UART0 pinctrl\n");
- else {
- u0_def = pinctrl_lookup_state(u0_p,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(u0_def)) {
- dev_err(&uart0_device->dev,
- "could not get UART0 defstate\n");
- }
- u0_sleep = pinctrl_lookup_state(u0_p,
- PINCTRL_STATE_SLEEP);
- if (IS_ERR(u0_sleep))
- dev_err(&uart0_device->dev,
- "could not get UART0 idlestate\n");
- }
- }
+ db8500_add_uart0(parent, &uart0_plat);
db8500_add_uart1(parent, &uart1_plat);
db8500_add_uart2(parent, &uart2_plat);
}
@@ -677,11 +625,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
&ab8500_device,
};
-static struct platform_device *snowball_of_platform_devs[] __initdata = {
- &snowball_led_dev,
- &snowball_key_dev,
-};
-
static void __init mop500_init_machine(void)
{
struct device *parent = NULL;
@@ -821,6 +764,11 @@ MACHINE_END
#ifdef CONFIG_MACH_UX500_DT
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+ &snowball_led_dev,
+ &snowball_key_dev,
+};
+
struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires DMA and call-back bindings. */
OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
@@ -838,6 +786,8 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+ /* Requires device name bindings. */
+ OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
{},
};
diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c
index 741e71feca78..66e7f00884ab 100644
--- a/arch/arm/mach-ux500/timer.c
+++ b/arch/arm/mach-ux500/timer.c
@@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
/* TODO: Once MTU has been DT:ed place code above into else. */
if (of_have_populated_dt()) {
+#ifdef CONFIG_OF
np = of_find_matching_node(NULL, prcmu_timer_of_match);
if (!np)
+#endif
goto dt_fail;
tmp_base = of_iomap(np, 0);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index cf4687ee2a7b..cd8ea3588f93 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -169,26 +169,13 @@ static struct map_desc versatile_io_desc[] __initdata = {
.pfn = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
.length = VERSATILE_PCI_CFG_BASE_SIZE,
.type = MT_DEVICE
- },
-#if 0
- {
- .virtual = VERSATILE_PCI_VIRT_MEM_BASE0,
- .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
- .length = SZ_16M,
- .type = MT_DEVICE
}, {
- .virtual = VERSATILE_PCI_VIRT_MEM_BASE1,
- .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1),
- .length = SZ_16M,
- .type = MT_DEVICE
- }, {
- .virtual = VERSATILE_PCI_VIRT_MEM_BASE2,
- .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
- .length = SZ_16M,
+ .virtual = (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0,
+ .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
+ .length = IO_SPACE_LIMIT,
.type = MT_DEVICE
},
#endif
-#endif
};
void __init versatile_map_io(void)
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h
index 4d4973dd8fba..408e58da46c6 100644
--- a/arch/arm/mach-versatile/include/mach/hardware.h
+++ b/arch/arm/mach-versatile/include/mach/hardware.h
@@ -29,8 +29,9 @@
*/
#define VERSATILE_PCI_VIRT_BASE (void __iomem *)0xe8000000ul
#define VERSATILE_PCI_CFG_VIRT_BASE (void __iomem *)0xe9000000ul
+#define VERSATILE_PCI_VIRT_MEM_BASE0 (void __iomem *)PCIO_BASE
-/* macro to get at IO space when running virtually */
+/* macro to get at MMIO space when running virtually */
#define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
#define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n))
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
new file mode 100644
index 000000000000..0406513be7d8
--- /dev/null
+++ b/arch/arm/mach-versatile/include/mach/io.h
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/mach-versatile/include/mach/io.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define PCIO_BASE 0xeb000000ul
+
+#define __io(a) ((a) + PCIO_BASE)
+
+#endif
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 15c6a00000ec..e95bf84cc837 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -169,11 +169,18 @@ static struct pci_ops pci_versatile_ops = {
.write = versatile_write_config,
};
+static struct resource io_port = {
+ .name = "PCI",
+ .start = 0,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
+};
+
static struct resource io_mem = {
.name = "PCI I/O space",
.start = VERSATILE_PCI_MEM_BASE0,
.end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
- .flags = IORESOURCE_IO,
+ .flags = IORESOURCE_MEM,
};
static struct resource non_mem = {
@@ -200,6 +207,12 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
"memory region (%d)\n", ret);
goto out;
}
+ ret = request_resource(&ioport_resource, &io_port);
+ if (ret) {
+ printk(KERN_ERR "PCI: unable to allocate I/O "
+ "port region (%d)\n", ret);
+ goto out;
+ }
ret = request_resource(&iomem_resource, &non_mem);
if (ret) {
printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
@@ -218,7 +231,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
* the mem resource for this bus
* the prefetch mem resource for this bus
*/
- pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset);
+ pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset);
pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
@@ -249,6 +262,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
if (nr == 0) {
sys->mem_offset = 0;
+ sys->io_offset = 0;
ret = pci_versatile_setup_resources(sys);
if (ret < 0) {
printk("pci_versatile_setup: resources... oops?\n");
@@ -325,7 +339,6 @@ void __init pci_versatile_preinit(void)
static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
- int devslot = PCI_SLOT(dev->devfn);
/* slot, pin, irq
* 24 1 27
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ea6b43154090..655878bcc96d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
void __init init_consistent_dma_size(unsigned long size)
{
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
-#ifndef CONFIG_ARM_DMA_USE_IOMMU
- if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
-#endif
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
@@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
};
-size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
static int __init early_coherent_pool(char *p)
{
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
struct page *page;
void *ptr;
- if (cpu_architecture() < CPU_ARCH_ARMv6)
+ if (!IS_ENABLED(CONFIG_CMA))
return 0;
ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (arch_is_coherent() || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else if (gfp & GFP_ATOMIC)
addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
if (arch_is_coherent() || nommu()) {
__dma_free_buffer(page, size);
- } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+ } else if (!IS_ENABLED(CONFIG_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
@@ -1069,7 +1067,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
return NULL;
while (count) {
- int j, order = __ffs(count);
+ int j, order = __fls(count);
pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
while (!pages[i] && order)
@@ -1093,7 +1091,7 @@ error:
while (--i)
if (pages[i])
__free_pages(pages[i], 0);
- if (array_size < PAGE_SIZE)
+ if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
@@ -1108,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
- if (array_size < PAGE_SIZE)
+ if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c21d06c7dd7e..f54d59219764 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 93dc0c17cdcb..2e8a1efdf7b8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -62,9 +62,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
#ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
#else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e5dad60b558b..cf4528d51774 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
}
}
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h). However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings. This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+ struct vm_struct *vm;
+
+ vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm->addr = (void *)addr;
+ vm->size = SECTION_SIZE;
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->caller = pmd_empty_section_gap;
+ vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+ struct vm_struct *vm;
+ unsigned long addr, next = 0;
+ pmd_t *pmd;
+
+ /* we're still single threaded hence no lock needed here */
+ for (vm = vmlist; vm; vm = vm->next) {
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ addr = (unsigned long)vm->addr;
+ if (addr < next)
+ continue;
+
+ /*
+ * Check if this vm starts on an odd section boundary.
+ * If so and the first section entry for this PMD is free
+ * then we block the corresponding virtual address.
+ */
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ pmd = pmd_off_k(addr);
+ if (pmd_none(*pmd))
+ pmd_empty_section_gap(addr & PMD_MASK);
+ }
+
+ /*
+ * Then check if this vm ends on an odd section boundary.
+ * If so and the second section entry for this PMD is empty
+ * then we block the corresponding virtual address.
+ */
+ addr += vm->size;
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ pmd = pmd_off_k(addr) + 1;
+ if (pmd_none(*pmd))
+ pmd_empty_section_gap(addr);
+ }
+
+ /* no need to look at any vm entry until we hit the next PMD */
+ next = (addr + PMD_SIZE - 1) & PMD_MASK;
+ }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
if (mdesc->map_io)
mdesc->map_io();
+ fill_pmd_gaps();
/*
* Finally flush the caches and tlb to ensure that we're in a
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 62135849f48b..c641fb685017 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -762,6 +762,11 @@ b_epilogue:
update_on_xread(ctx);
emit(ARM_MOV_R(r_A, r_X), ctx);
break;
+ case BPF_S_ANC_ALU_XOR_X:
+ /* A ^= X */
+ update_on_xread(ctx);
+ emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+ break;
case BPF_S_ANC_PROTOCOL:
/* A = ntohs(skb->protocol) */
ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index 99ae5e3f46d2..7fa2f7d3cb90 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -68,6 +68,8 @@
#define ARM_INST_CMP_R 0x01500000
#define ARM_INST_CMP_I 0x03500000
+#define ARM_INST_EOR_R 0x00200000
+
#define ARM_INST_LDRB_I 0x05d00000
#define ARM_INST_LDRB_R 0x07d00000
#define ARM_INST_LDRH_I 0x01d000b0
@@ -132,6 +134,8 @@
#define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm)
#define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm)
+#define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm)
+
#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
| (off))
#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
diff --git a/arch/arm/plat-mxc/epit.c b/arch/arm/plat-mxc/epit.c
index 9129c9e7d532..88726f4dbbfa 100644
--- a/arch/arm/plat-mxc/epit.c
+++ b/arch/arm/plat-mxc/epit.c
@@ -50,6 +50,7 @@
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include <asm/mach/time.h>
@@ -201,8 +202,16 @@ static int __init epit_clockevent_init(struct clk *timer_clk)
return 0;
}
-void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init epit_timer_init(void __iomem *base, int irq)
{
+ struct clk *timer_clk;
+
+ timer_clk = clk_get_sys("imx-epit.0", NULL);
+ if (IS_ERR(timer_clk)) {
+ pr_err("i.MX epit: unable to get clk\n");
+ return;
+ }
+
clk_prepare_enable(timer_clk);
timer_base = base;
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index cf663d84e7c1..e429ca1b814a 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -54,8 +54,8 @@ extern void imx50_soc_init(void);
extern void imx51_soc_init(void);
extern void imx53_soc_init(void);
extern void imx51_init_late(void);
-extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
-extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
+extern void epit_timer_init(void __iomem *base, int irq);
+extern void mxc_timer_init(void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
extern int mx25_clocks_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/mx2_cam.h b/arch/arm/plat-mxc/include/mach/mx2_cam.h
index 7ded6f1f74bc..3c080a32dbf5 100644
--- a/arch/arm/plat-mxc/include/mach/mx2_cam.h
+++ b/arch/arm/plat-mxc/include/mach/mx2_cam.h
@@ -23,6 +23,7 @@
#ifndef __MACH_MX2_CAM_H_
#define __MACH_MX2_CAM_H_
+#define MX2_CAMERA_SWAP16 (1 << 0)
#define MX2_CAMERA_EXT_VSYNC (1 << 1)
#define MX2_CAMERA_CCIR (1 << 2)
#define MX2_CAMERA_CCIR_INTERLACE (1 << 3)
@@ -30,6 +31,7 @@
#define MX2_CAMERA_GATED_CLOCK (1 << 5)
#define MX2_CAMERA_INV_DATA (1 << 6)
#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7)
+#define MX2_CAMERA_PACK_DIR_MSB (1 << 8)
/**
* struct mx2_camera_platform_data - optional platform data for mx2_camera
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 99f958ca6cb8..00e8e659e667 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -58,6 +58,7 @@
/* MX31, MX35, MX25, MX5 */
#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
#define V2_TCTL_CLK_IPG (1 << 6)
+#define V2_TCTL_CLK_PER (2 << 6)
#define V2_TCTL_FRR (1 << 9)
#define V2_IR 0x0c
#define V2_TSTAT 0x08
@@ -280,23 +281,22 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
return 0;
}
-void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init mxc_timer_init(void __iomem *base, int irq)
{
uint32_t tctl_val;
+ struct clk *timer_clk;
struct clk *timer_ipg_clk;
- if (!timer_clk) {
- timer_clk = clk_get_sys("imx-gpt.0", "per");
- if (IS_ERR(timer_clk)) {
- pr_err("i.MX timer: unable to get clk\n");
- return;
- }
-
- timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
- if (!IS_ERR(timer_ipg_clk))
- clk_prepare_enable(timer_ipg_clk);
+ timer_clk = clk_get_sys("imx-gpt.0", "per");
+ if (IS_ERR(timer_clk)) {
+ pr_err("i.MX timer: unable to get clk\n");
+ return;
}
+ timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+ if (!IS_ERR(timer_ipg_clk))
+ clk_prepare_enable(timer_ipg_clk);
+
clk_prepare_enable(timer_clk);
timer_base = base;
@@ -309,7 +309,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
__raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
if (timer_is_v2())
- tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+ tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
else
tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 62ec5c452792..706b7e29397f 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -461,6 +461,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
struct clk *c;
struct clk *pa;
+ mutex_lock(&clocks_mutex);
seq_printf(s, "%-30s %-30s %-10s %s\n",
"clock-name", "parent-name", "rate", "use-count");
@@ -469,6 +470,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
seq_printf(s, "%-30s %-30s %-10lu %d\n",
c->name, pa ? pa->name : "none", c->rate, c->usecount);
}
+ mutex_unlock(&clocks_mutex);
return 0;
}
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 297245dba66e..de6c0a08f461 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -252,8 +252,6 @@ IS_AM_SUBCLASS(335x, 0x335)
* cpu_is_omap2423(): True for OMAP2423
* cpu_is_omap2430(): True for OMAP2430
* cpu_is_omap3430(): True for OMAP3430
- * cpu_is_omap3505(): True for OMAP3505
- * cpu_is_omap3517(): True for OMAP3517
*/
#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff)
@@ -277,8 +275,6 @@ IS_OMAP_TYPE(2422, 0x2422)
IS_OMAP_TYPE(2423, 0x2423)
IS_OMAP_TYPE(2430, 0x2430)
IS_OMAP_TYPE(3430, 0x3430)
-IS_OMAP_TYPE(3505, 0x3517)
-IS_OMAP_TYPE(3517, 0x3517)
#define cpu_is_omap310() 0
#define cpu_is_omap730() 0
@@ -293,12 +289,6 @@ IS_OMAP_TYPE(3517, 0x3517)
#define cpu_is_omap2422() 0
#define cpu_is_omap2423() 0
#define cpu_is_omap2430() 0
-#define cpu_is_omap3503() 0
-#define cpu_is_omap3515() 0
-#define cpu_is_omap3525() 0
-#define cpu_is_omap3530() 0
-#define cpu_is_omap3505() 0
-#define cpu_is_omap3517() 0
#define cpu_is_omap3430() 0
#define cpu_is_omap3630() 0
@@ -350,12 +340,6 @@ IS_OMAP_TYPE(3517, 0x3517)
#if defined(CONFIG_ARCH_OMAP3)
# undef cpu_is_omap3430
-# undef cpu_is_omap3503
-# undef cpu_is_omap3515
-# undef cpu_is_omap3525
-# undef cpu_is_omap3530
-# undef cpu_is_omap3505
-# undef cpu_is_omap3517
# undef cpu_is_ti81xx
# undef cpu_is_ti816x
# undef cpu_is_ti814x
@@ -363,19 +347,6 @@ IS_OMAP_TYPE(3517, 0x3517)
# undef cpu_is_am33xx
# undef cpu_is_am335x
# define cpu_is_omap3430() is_omap3430()
-# define cpu_is_omap3503() (cpu_is_omap3430() && \
- (!omap3_has_iva()) && \
- (!omap3_has_sgx()))
-# define cpu_is_omap3515() (cpu_is_omap3430() && \
- (!omap3_has_iva()) && \
- (omap3_has_sgx()))
-# define cpu_is_omap3525() (cpu_is_omap3430() && \
- (!omap3_has_sgx()) && \
- (omap3_has_iva()))
-# define cpu_is_omap3530() (cpu_is_omap3430())
-# define cpu_is_omap3517() is_omap3517()
-# define cpu_is_omap3505() (cpu_is_omap3517() && \
- !omap3_has_sgx())
# undef cpu_is_omap3630
# define cpu_is_omap3630() is_omap363x()
# define cpu_is_ti81xx() is_ti81xx()
@@ -424,10 +395,6 @@ IS_OMAP_TYPE(3517, 0x3517)
#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8))
#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8))
-#define OMAP3517_CLASS 0x35170034
-#define OMAP3517_REV_ES1_0 OMAP3517_CLASS
-#define OMAP3517_REV_ES1_1 (OMAP3517_CLASS | (0x1 << 8))
-
#define TI816X_CLASS 0x81600034
#define TI8168_REV_ES1_0 TI816X_CLASS
#define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8))
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index a7754a886d42..5493bd95da5e 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -172,8 +172,7 @@ struct omap_mmc_platform_data {
extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
int is_closed);
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
- defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers);
void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
@@ -185,7 +184,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
}
-
#endif
extern int omap_msdi_reset(struct omap_hwmod *oh);
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 61fd837624a8..c1793786aea9 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -582,7 +582,7 @@ void __init orion_spi_1_init(unsigned long mapbase)
* Watchdog
****************************************************************************/
static struct resource orion_wdt_resource =
- DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
+ DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28);
static struct platform_device orion_wdt_device = {
.name = "orion_wdt",
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 58b79809d20c..584c9bf8ed2d 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -193,6 +193,7 @@ static const struct platform_device_id ssp_id_table[] = {
{ "pxa25x-nssp", PXA25x_NSSP },
{ "pxa27x-ssp", PXA27x_SSP },
{ "pxa168-ssp", PXA168_SSP },
+ { "pxa910-ssp", PXA910_SSP },
{ },
};
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 33ecd0c9f0c3..b1e05ccff3ac 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
return -EINVAL;
}
- if (client->is_ts && adc->ts_pend)
- return -EAGAIN;
-
spin_lock_irqsave(&adc->lock, flags);
+ if (client->is_ts && adc->ts_pend) {
+ spin_unlock_irqrestore(&adc->lock, flags);
+ return -EAGAIN;
+ }
+
client->channel = channel;
client->nr_samples = nr_samples;
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 1d214cb9d770..6303974c2ee0 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
#ifdef CONFIG_CPU_S3C2440
static struct resource s3c_camif_resource[] = {
[0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
- [1] = DEFINE_RES_IRQ(IRQ_CAM),
+ [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
+ [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
};
struct platform_device s3c_device_camif = {
diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h
index 7d048759b772..c0c70a895ca8 100644
--- a/arch/arm/plat-samsung/include/plat/map-s3c.h
+++ b/arch/arm/plat-samsung/include/plat/map-s3c.h
@@ -22,7 +22,7 @@
#define S3C24XX_VA_WATCHDOG S3C_VA_WATCHDOG
#define S3C2412_VA_SSMC S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00100000)
#define S3C2410_PA_UART (0x50000000)
#define S3C24XX_PA_UART S3C2410_PA_UART
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index f19aff19205c..bc4db9b04e36 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -25,7 +25,7 @@ static inline void arch_wdt_reset(void)
__raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
- if (s3c2410_wdtclk)
+ if (!IS_ERR(s3c2410_wdtclk))
clk_enable(s3c2410_wdtclk);
/* put initial values into count and data */
diff --git a/arch/arm/plat-samsung/s5p-clock.c b/arch/arm/plat-samsung/s5p-clock.c
index 031a61899bef..48a159911037 100644
--- a/arch/arm/plat-samsung/s5p-clock.c
+++ b/arch/arm/plat-samsung/s5p-clock.c
@@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
struct clk clk_xusbxti = {
.name = "xusbxti",
.id = -1,
+ .rate = 24000000,
};
struct clk s5p_clk_27m = {
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S
index ab3de721c5db..75b05ad0fbad 100644
--- a/arch/arm/plat-spear/include/plat/debug-macro.S
+++ b/arch/arm/plat-spear/include/plat/debug-macro.S
@@ -4,7 +4,7 @@
* Debugging macro include header for spear platform
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index e14a3e4932f9..2bc6b54460a8 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -4,7 +4,7 @@
* DMAC pl080 definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/arch/arm/plat-spear/include/plat/shirq.h
index 03ed8b585dcf..88a7fbd24793 100644
--- a/arch/arm/plat-spear/include/plat/shirq.h
+++ b/arch/arm/plat-spear/include/plat/shirq.h
@@ -4,7 +4,7 @@
* SPEAr platform shared irq layer header file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/timex.h b/arch/arm/plat-spear/include/plat/timex.h
index 914d09dd50fd..ef95e5b780bd 100644
--- a/arch/arm/plat-spear/include/plat/timex.h
+++ b/arch/arm/plat-spear/include/plat/timex.h
@@ -4,7 +4,7 @@
* SPEAr platform specific timex definitions
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 6dd455bafdfd..2ce6cb17a98b 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -4,7 +4,7 @@
* Serial port stubs for kernel decompress status messages
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index a56a067717c1..12cf27f935f9 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -4,7 +4,7 @@
* DMAC pl080 definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
index ea0a61302b7e..4f990115b1bd 100644
--- a/arch/arm/plat-spear/restart.c
+++ b/arch/arm/plat-spear/restart.c
@@ -4,7 +4,7 @@
* SPEAr platform specific restart functions
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
index 961fb7261243..853e891e1184 100644
--- a/arch/arm/plat-spear/shirq.c
+++ b/arch/arm/plat-spear/shirq.c
@@ -4,7 +4,7 @@
* SPEAr platform shared irq layer source file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 2e3994b20169..62bcea7dcc6d 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
unsigned long newsp;
#ifdef __ARCH_SYNC_CORE_DCACHE
- if (current->rt.nr_cpus_allowed == num_possible_cpus())
+ if (current->nr_cpus_allowed == num_possible_cpus())
set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
#endif
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index a09230a08e02..62ef17676b40 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
#define VMALLOC_END 0xffffffff
#define arch_enter_lazy_cpu_mode() do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
#endif /* _H8300_PGTABLE_H */
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
index 356068cd0879..8725d1ad4272 100644
--- a/arch/h8300/include/asm/uaccess.h
+++ b/arch/h8300/include/asm/uaccess.h
@@ -100,7 +100,6 @@ extern int __put_user_bad(void);
break; \
default: \
__gu_err = __get_user_bad(); \
- __gu_val = 0; \
break; \
} \
(x) = __gu_val; \
@@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
return 0;
}
+#define __clear_user clear_user
+
#endif /* _H8300_UACCESS_H */
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 68d651081bd3..d0b1607f2711 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -35,6 +35,7 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
+#include <asm/sections.h>
#if defined(__H8300H__)
#define CPU "H8/300H"
@@ -54,7 +55,6 @@ unsigned long memory_end;
char __initdata command_line[COMMAND_LINE_SIZE];
-extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
extern int _ramstart, _ramend;
extern char _target_name[];
extern void h8300_gpio_init(void);
@@ -119,9 +119,9 @@ void __init setup_arch(char **cmdline_p)
memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS;
#endif
- init_mm.start_code = (unsigned long) &_stext;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) 0;
#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
@@ -134,15 +134,12 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
#ifdef DEBUG
- printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
- "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
- (int) &_sdata, (int) &_edata,
- (int) &_sbss, (int) &_ebss);
- printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x "
- "STACK=0x%06x-0x%06x\n",
- (int) &_ebss, (int) memory_start,
- (int) memory_start, (int) memory_end,
- (int) memory_end, (int) &_ramend);
+ printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
+ "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
+ __bss_stop);
+ printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
+ "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
+ memory_end, memory_end, &_ramend);
#endif
#ifdef CONFIG_DEFAULT_CMDLINE
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index fca10378701b..5adaadaf9218 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-statis void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 32263a138aa6..e0f74191d553 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -27,6 +27,7 @@
#include <linux/profile.h>
#include <asm/io.h>
+#include <asm/irq_regs.h>
#include <asm/timer.h>
#define TICK_SIZE (tick_nsec / 1000)
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 973369c32a95..981e25094b1a 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -36,6 +36,7 @@
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/sections.h>
#undef DEBUG
@@ -123,7 +124,6 @@ void __init mem_init(void)
int codek = 0, datak = 0, initk = 0;
/* DAVIDM look at setup memory map generically with reserved area */
unsigned long tmp;
- extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
extern unsigned long _ramend, _ramstart;
unsigned long len = &_ramend - &_ramstart;
unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
@@ -142,9 +142,9 @@ void __init mem_init(void)
/* this will put all memory onto the freelists */
totalram_pages = free_all_bootmem();
- codek = (&_etext - &_stext) >> 10;
- datak = (&_ebss - &_sdata) >> 10;
- initk = (&__init_begin - &__init_end) >> 10;
+ codek = (_etext - _stext) >> 10;
+ datak = (__bss_stop - _sdata) >> 10;
+ initk = (__init_begin - __init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
@@ -178,22 +178,21 @@ free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
unsigned long addr;
- extern char __init_begin, __init_end;
/*
* the following code should be cool even if these sections
* are not page aligned.
*/
- addr = PAGE_ALIGN((unsigned long)(&__init_begin));
+ addr = PAGE_ALIGN((unsigned long)(__init_begin));
/* next to check that the page we free is not a partial page */
- for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
+ for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
- (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
- (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
+ (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
+ (int)(PAGE_ALIGN((unsigned long)__init_begin)),
(int)(addr - PAGE_SIZE));
#endif
}
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 177716b1d613..01729c2979ba 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -43,9 +43,9 @@ endif
OBJCOPYFLAGS += -R .empty_zero_page
-suffix_$(CONFIG_KERNEL_GZIP) = gz
-suffix_$(CONFIG_KERNEL_BZIP2) = bz2
-suffix_$(CONFIG_KERNEL_LZMA) = lzma
+suffix-$(CONFIG_KERNEL_GZIP) = gz
+suffix-$(CONFIG_KERNEL_BZIP2) = bz2
+suffix-$(CONFIG_KERNEL_LZMA) = lzma
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
$(call if_changed,ld)
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
index 370d60881977..28a09529f206 100644
--- a/arch/m32r/boot/compressed/misc.c
+++ b/arch/m32r/boot/compressed/misc.c
@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_KERNEL_BZIP2
-static void *memset(void *s, int c, size_t n)
+void *memset(void *s, int c, size_t n)
{
char *ss = s;
@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
#endif
#ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ char *d = dest;
+ const char *s = src;
+ while (n--)
+ *d++ = *s++;
+
+ return dest;
+}
+
#define BOOT_HEAP_SIZE 0x10000
#include "../../../../lib/decompress_inflate.c"
#endif
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h
index 527527584dd0..4313aa62b51b 100644
--- a/arch/m32r/include/asm/ptrace.h
+++ b/arch/m32r/include/asm/ptrace.h
@@ -113,9 +113,6 @@ struct pt_regs {
#define PTRACE_OLDSETOPTIONS 21
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD 0x00000001
-
#ifdef __KERNEL__
#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 4c03361537aa..51f5e9aa4901 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -591,17 +591,16 @@ void user_enable_single_step(struct task_struct *child)
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
- return -EIO;
+ return;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
- return -EIO;
+ return;
if (embed_debug_trap(child, next_pc))
- return -EIO;
+ return;
invalidate_cache();
- return 0;
}
void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index f3fb2c029cfc..d0f60b97bbc5 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -286,7 +286,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
if (prev_insn(regs) < 0)
- return -EFAULT;
+ return;
}
}
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index cac5b6be572a..147120128260 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -7,6 +7,8 @@ config M68K
select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
+ select GENERIC_STRNCPY_FROM_USER if MMU
+ select GENERIC_STRNLEN_USER if MMU
select FPU if MMU
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1a922fad76f7..eafa2539a8ee 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,2 +1,4 @@
include include/asm-generic/Kbuild.asm
header-y += cachectl.h
+
+generic-y += word-at-a-time.h
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index d63b99ff7ff7..497c31c803ff 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -86,7 +86,7 @@
/*
* QSPI module.
*/
-#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
+#define MCFQSPI_BASE (MCF_IPSBAR + 0x340)
#define MCFQSPI_SIZE 0x40
#define MCFQSPI_CS0 147
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 9c80cd515b20..472c891a4aee 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
-long strncpy_from_user(char *dst, const char __user *src, long count);
-long strnlen_user(const char __user *src, long n);
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
unsigned long __clear_user(void __user *to, unsigned long n);
#define clear_user __clear_user
-#define strlen_user(str) strnlen_user(str, 32767)
-
#endif /* _M68K_UACCESS_H */
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 8b4a2222e658..1bc10e62b9af 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
}
}
-#ifdef CONFIG_COLDFIRE
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
asmlinkage int syscall_trace_enter(void)
{
int ret = 0;
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index d7deb7fc7eb5..707f0573ec6b 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -85,7 +85,7 @@ void __init time_init(void)
mach_sched_init(timer_interrupt);
}
-#ifdef CONFIG_M68KCLASSIC
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
u32 arch_gettimeoffset(void)
{
@@ -108,4 +108,4 @@ static int __init rtc_init(void)
module_init(rtc_init);
-#endif /* CONFIG_M68KCLASSIC */
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 5664386338da..5e97f2ee7c11 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
EXPORT_SYMBOL(__generic_copy_to_user);
/*
- * Copy a null terminated string from userspace.
- */
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res;
- char c;
-
- if (count <= 0)
- return count;
-
- asm volatile ("\n"
- "1: "MOVES".b (%2)+,%4\n"
- " move.b %4,(%1)+\n"
- " jeq 2f\n"
- " subq.l #1,%3\n"
- " jne 1b\n"
- "2: sub.l %3,%0\n"
- "3:\n"
- " .section .fixup,\"ax\"\n"
- " .even\n"
- "10: move.l %5,%0\n"
- " jra 3b\n"
- " .previous\n"
- "\n"
- " .section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,10b\n"
- " .previous"
- : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
- : "i" (-EFAULT), "0" (count));
-
- return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-long strnlen_user(const char __user *src, long n)
-{
- char c;
- long res;
-
- asm volatile ("\n"
- "1: subq.l #1,%1\n"
- " jmi 3f\n"
- "2: "MOVES".b (%0)+,%2\n"
- " tst.b %2\n"
- " jne 1b\n"
- " jra 4f\n"
- "\n"
- "3: addq.l #1,%0\n"
- "4: sub.l %4,%0\n"
- "5:\n"
- " .section .fixup,\"ax\"\n"
- " .even\n"
- "20: sub.l %0,%0\n"
- " jra 5b\n"
- " .previous\n"
- "\n"
- " .section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 2b,20b\n"
- " .previous\n"
- : "=&a" (res), "+d" (n), "=&d" (c)
- : "0" (src), "r" (src));
-
- return res;
-}
-EXPORT_SYMBOL(strnlen_user);
-
-/*
* Zero Userspace
*/
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c
index c801c172b822..f4dc9b295609 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68328/timers.c
@@ -53,6 +53,7 @@
#endif
static u32 m68328_tick_cnt;
+static irq_handler_t timer_interrupt;
/***************************************************************************/
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
TSTAT &= 0;
m68328_tick_cnt += TICKS_PER_JIFFY;
- return arch_timer_interrupt(irq, dummy);
+ return timer_interrupt(irq, dummy);
}
/***************************************************************************/
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
/***************************************************************************/
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
{
/* disable timer 1 */
TCTL = 0;
@@ -115,6 +116,7 @@ void hw_timer_init(void)
/* Enable timer 1 */
TCTL |= TCTL_TEN;
clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
+ timer_interrupt = handler;
}
/***************************************************************************/
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c
index 255fc03913e9..9877cefad1e7 100644
--- a/arch/m68k/platform/68360/config.c
+++ b/arch/m68k/platform/68360/config.c
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
#define OSCILLATOR (unsigned long int)33000000
#endif
+static irq_handler_t timer_interrupt;
unsigned long int system_clock;
extern QUICC *pquicc;
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
pquicc->timer_ter1 = 0x0002; /* clear timer event */
- return arch_timer_interrupt(irq, dummy);
+ return timer_interrupt(irq, dummy);
}
static struct irqaction m68360_timer_irq = {
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
.handler = hw_tick,
};
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
{
unsigned char prescaler;
unsigned short tgcr_save;
@@ -94,6 +95,8 @@ void hw_timer_init(void)
pquicc->timer_ter1 = 0x0003; /* clear timer events */
+ timer_interrupt = handler;
+
/* enable timer 1 interrupt in CIMR */
setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
diff --git a/arch/m68k/platform/coldfire/clk.c b/arch/m68k/platform/coldfire/clk.c
index 9f1260c5e2ad..44da406897e5 100644
--- a/arch/m68k/platform/coldfire/clk.c
+++ b/arch/m68k/platform/coldfire/clk.c
@@ -42,4 +42,11 @@ unsigned long clk_get_rate(struct clk *clk)
return MCF_CLK;
}
EXPORT_SYMBOL(clk_get_rate);
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
/***************************************************************************/
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 09ab87ee6fef..b3e10fdd3898 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -288,6 +288,7 @@ config MIPS_MALTA
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_CPU_MIPS64_R2
select SYS_HAS_CPU_NEVADA
select SYS_HAS_CPU_RM7000
select SYS_HAS_EARLY_PRINTK
@@ -1423,6 +1424,7 @@ config CPU_SB1
config CPU_CAVIUM_OCTEON
bool "Cavium Octeon processor"
depends on SYS_HAS_CPU_CAVIUM_OCTEON
+ select ARCH_SPARSEMEM_ENABLE
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_SMP
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 6210b8d84109..b311be45a720 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -21,6 +21,7 @@ config BCM47XX_BCMA
select BCMA
select BCMA_HOST_SOC
select BCMA_DRIVER_MIPS
+ select BCMA_HOST_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
default y
help
diff --git a/arch/mips/bcm63xx/dev-pcmcia.c b/arch/mips/bcm63xx/dev-pcmcia.c
index de4d917fd54d..a551bab5ecb9 100644
--- a/arch/mips/bcm63xx/dev-pcmcia.c
+++ b/arch/mips/bcm63xx/dev-pcmcia.c
@@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs,
return ret;
}
-static const __initdata struct {
+static const struct {
unsigned int cs;
unsigned int base;
unsigned int size;
-} pcmcia_cs[3] = {
+} pcmcia_cs[3] __initconst = {
{
.cs = MPI_CS_PCMCIA_COMMON,
.base = BCM_PCMCIA_COMMON_BASE_PA,
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index f9e275a50d98..2f4f6d5e05b6 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -82,10 +82,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
help
Lock the kernel's implementation of memcpy() into L2.
-config ARCH_SPARSEMEM_ENABLE
- def_bool y
- select SPARSEMEM_STATIC
-
config IOMMU_HELPER
bool
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 4b93048044eb..ee1fb9f7f517 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -185,7 +185,6 @@ static void __cpuinit octeon_init_secondary(void)
octeon_init_cvmcount();
octeon_irq_setup_secondary();
- raw_local_irq_enable();
}
/**
@@ -233,6 +232,7 @@ static void octeon_smp_finish(void)
/* to generate the first CPU timer interrupt */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+ local_irq_enable();
}
/**
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 2e1ad4c652b7..82ad35ce2b45 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -17,7 +17,6 @@
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
-#include <asm/bug.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/cpu-features.h>
#include <asm/sgidefs.h>
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 285a41fa0b18..eee10dc07ac1 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -8,6 +8,7 @@
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
+#include <linux/bug.h>
#include <linux/irqflags.h>
#include <asm/war.h>
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index f9fa2a479dd0..95e40c1e8ed1 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -94,6 +94,7 @@
#define PRID_IMP_24KE 0x9600
#define PRID_IMP_74K 0x9700
#define PRID_IMP_1004K 0x9900
+#define PRID_IMP_M14KC 0x9c00
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -260,12 +261,12 @@ enum cpu_type_enum {
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
- CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
/*
* MIPS64 class processors
*/
- CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+ CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
CPU_XLR, CPU_XLP,
@@ -288,7 +289,7 @@ enum cpu_type_enum {
#define MIPS_CPU_ISA_M64R2 0x00000100
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
- MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
+ MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 86548da650e7..991b659e2548 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -206,7 +206,7 @@
#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
#define GIC_VPE_EIC_SS(intr) \
- (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+ (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
#define GIC_VPE_EIC_VEC_BASE 0x0800
#define GIC_VPE_EIC_VEC(intr) \
@@ -330,6 +330,17 @@ struct gic_intr_map {
#define GIC_FLAG_TRANSPARENT 0x02
};
+/*
+ * This is only used in EIC mode. This helps to figure out which
+ * shared interrupts we need to process when we get a vector interrupt.
+ */
+#define GIC_MAX_SHARED_INTR 0x5
+struct gic_shared_intr_map {
+ unsigned int num_shared_intr;
+ unsigned int intr_list[GIC_MAX_SHARED_INTR];
+ unsigned int local_intr_mask;
+};
+
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
unsigned int intrmap_size, unsigned int irqbase);
@@ -338,5 +349,7 @@ extern unsigned int gic_get_int(void);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern void gic_bind_eic_interrupt(int irq, int set);
+extern unsigned int gic_get_timer_pending(void);
#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index 7ebfc392e58d..ab84064283db 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -251,7 +251,7 @@ struct f_format { /* FPU register format */
unsigned int func : 6;
};
-struct ma_format { /* FPU multipy and add format (MIPS IV) */
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
unsigned int opcode : 6;
unsigned int fr : 5;
unsigned int ft : 5;
@@ -324,7 +324,7 @@ struct f_format { /* FPU register format */
unsigned int opcode : 6;
};
-struct ma_format { /* FPU multipy and add format (MIPS IV) */
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
unsigned int fmt : 2;
unsigned int func : 4;
unsigned int fd : 5;
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index a58f22998a86..29d9c23c20c7 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <asm/addrspace.h>
+#include <asm/bug.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index fb698dc09bc9..78dbb8a86da2 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -136,6 +136,7 @@ extern void free_irqno(unsigned int irq);
* IE7. Since R2 their number has to be read from the c0_intctl register.
*/
#define CP0_LEGACY_COMPARE_IRQ 7
+#define CP0_LEGACY_PERFCNT_IRQ 7
extern int cp0_compare_irq;
extern int cp0_compare_irq_shift;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 94d4faad29a1..fdcd78ca1b03 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -99,7 +99,7 @@
#define CKCTL_6368_USBH_CLK_EN (1 << 15)
#define CKCTL_6368_DISABLE_GLESS_EN (1 << 16)
#define CKCTL_6368_NAND_CLK_EN (1 << 17)
-#define CKCTL_6368_IPSEC_CLK_EN (1 << 17)
+#define CKCTL_6368_IPSEC_CLK_EN (1 << 18)
#define CKCTL_6368_ALL_SAFE_EN (CKCTL_6368_SWPKT_USB_EN | \
CKCTL_6368_SWPKT_SAR_EN | \
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index d11aa02a956a..5447d9fc4219 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -86,6 +86,16 @@
#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 5 */
+/* MALTA GIC local interrupts */
+#define GIC_INT_TMR (GIC_CPU_INT5)
+#define GIC_INT_PERFCTR (GIC_CPU_INT5)
+
+/* GIC constants */
+/* Add 2 to convert non-eic hw int # to eic vector # */
+#define GIC_CPU_TO_VEC_OFFSET (2)
+/* If we map an intr to pin X, GIC will actually generate vector X+1 */
+#define GIC_PIN_TO_VEC_OFFSET (1)
+
#define GIC_EXT_INTR(x) x
/* External Interrupts used for IPI */
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index c9420aa97e32..e71ff4c317f2 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -48,7 +48,7 @@
#define CP0_VPECONF0 $1, 2
#define CP0_VPECONF1 $1, 3
#define CP0_YQMASK $1, 4
-#define CP0_VPESCHEDULE $1, 5
+#define CP0_VPESCHEDULE $1, 5
#define CP0_VPESCHEFBK $1, 6
#define CP0_TCSTATUS $2, 1
#define CP0_TCBIND $2, 2
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 5d33621b5658..4f8ddba8c360 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -22,7 +22,7 @@ struct task_struct;
* switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
*/
-extern asmlinkage void *resume(void *last, void *next, void *next_ti);
+extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
extern unsigned int ll_bit;
extern struct task_struct *ll_task;
@@ -66,11 +66,13 @@ do { \
#define switch_to(prev, next, last) \
do { \
+ u32 __usedfpu; \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
__save_dsp(prev); \
__clear_software_ll_bit(); \
- (last) = resume(prev, next, task_thread_info(next)); \
+ __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
+ (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
} while (0)
#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index e2eca7d10598..ca97e0ecb64b 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -60,6 +60,8 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("$28");
#define current_thread_info() __current_thread_info
+#endif /* !__ASSEMBLY__ */
+
/* thread information allocation */
#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
#define THREAD_SIZE_ORDER (1)
@@ -85,8 +87,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define STACK_WARN (THREAD_SIZE / 8)
-#endif /* !__ASSEMBLY__ */
-
#define PREEMPT_ACTIVE 0x10000000
/*
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6ae7ce4ac63e..f4630e1082ab 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -4,7 +4,7 @@
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
- * Copyright (C) 2001, 2004 MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -199,6 +199,7 @@ void __init check_wait(void)
cpu_wait = rm7k_wait_irqoff;
break;
+ case CPU_M14KC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
@@ -810,6 +811,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_5KC;
__cpu_name[cpu] = "MIPS 5Kc";
break;
+ case PRID_IMP_5KE:
+ c->cputype = CPU_5KE;
+ __cpu_name[cpu] = "MIPS 5KE";
+ break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
__cpu_name[cpu] = "MIPS 20Kc";
@@ -831,6 +836,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_74K;
__cpu_name[cpu] = "MIPS 74Kc";
break;
+ case PRID_IMP_M14KC:
+ c->cputype = CPU_M14KC;
+ __cpu_name[cpu] = "MIPS M14Kc";
+ break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
__cpu_name[cpu] = "MIPS 1004Kc";
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 57ba13edb03a..3fc1691110dc 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/
#include <linux/interrupt.h>
@@ -35,6 +35,12 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(kernel_thread);
/*
+ * Functions that operate on entire pages. Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index ce89c8061708..0441f54b2a6a 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -31,7 +31,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 7
LEAF(resume)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index f29099b104c4..eb5e394a4650 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
return counters >> vpe_shift();
}
-static unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
- return counters << vpe_shift();
-}
-
#else /* !CONFIG_MIPS_MT_SMP */
#define vpe_id() 0
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 293898391e67..9c51be5a163a 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -43,7 +43,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti) )
+ * struct thread_info *next_ti, int usedfpu)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
@@ -51,18 +51,9 @@ LEAF(resume)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
- /*
- * check if we need to save FPU registers
- */
- lw t3, TASK_THREAD_INFO(a0)
- lw t0, TI_FLAGS(t3)
- li t1, _TIF_USEDFPU
- and t2, t0, t1
- beqz t2, 1f
- nor t1, zero, t1
+ beqz a3, 1f
- and t0, t0, t1
- sw t0, TI_FLAGS(t3)
+ PTR_L t3, TASK_THREAD_INFO(a0)
/*
* clear saved user stack CU1 bit
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 9414f9354469..42d2a3938420 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -41,7 +41,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 5
LEAF(resume)
@@ -53,16 +53,10 @@
/*
* check if we need to save FPU registers
*/
- PTR_L t3, TASK_THREAD_INFO(a0)
- LONG_L t0, TI_FLAGS(t3)
- li t1, _TIF_USEDFPU
- and t2, t0, t1
- beqz t2, 1f
- nor t1, zero, t1
- and t0, t0, t1
- LONG_S t0, TI_FLAGS(t3)
+ beqz a3, 1f
+ PTR_L t3, TASK_THREAD_INFO(a0)
/*
* clear saved user stack CU1 bit
*/
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 3046e2986006..8e393b8443f7 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -15,7 +15,6 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
@@ -197,13 +196,6 @@ static void bmips_init_secondary(void)
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
#endif
-
- /* make sure there won't be a timer interrupt for a little while */
- write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
-
- irq_enable_hazard();
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
- irq_enable_hazard();
}
/*
@@ -212,6 +204,13 @@ static void bmips_init_secondary(void)
static void bmips_smp_finish(void)
{
pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+ /* make sure there won't be a timer interrupt for a little while */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+ irq_enable_hazard();
+ set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+ irq_enable_hazard();
}
/*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 48650c818040..1268392f1d27 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -122,13 +122,21 @@ asmlinkage __cpuinit void start_secondary(void)
notify_cpu_starting(cpu);
- mp_ops->smp_finish();
+ set_cpu_online(cpu, true);
+
set_cpu_sibling_map(cpu);
cpu_set(cpu, cpu_callin_map);
synchronise_count_slave();
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ mp_ops->smp_finish();
+
cpu_idle();
}
@@ -196,8 +204,6 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
- set_cpu_online(cpu, true);
-
return 0;
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f5dd38f1d015..15b5f3cfd20c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
/*
* Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
+ * Make sure all CPUs are in a sensible state before we boot any of the
* secondaries.
*
* For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
@@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
/*
* TCContext gets an offset from the base of the IPIQ array
* to be used in low-level code to detect the presence of
- * an active IPI queue
+ * an active IPI queue.
*/
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
/* Bind tc to vpe */
write_tc_c0_tcbind(vpe);
- /* In general, all TCs should have the same cpu_data indications */
+ /* In general, all TCs should have the same cpu_data indications. */
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
if (cpu_data[0].cputype == CPU_34K ||
@@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
}
/*
- * Tweak to get Count registes in as close a sync as possible.
- * Value seems good for 34K-class cores.
+ * Tweak to get Count registes in as close a sync as possible. The
+ * value seems good for 34K-class cores.
*/
#define CP0_SKEW 8
@@ -615,7 +615,6 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
void smtc_init_secondary(void)
{
- local_irq_enable();
}
void smtc_smp_finish(void)
@@ -631,6 +630,8 @@ void smtc_smp_finish(void)
if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+ local_irq_enable();
+
printk("TC %d going on-line as CPU %d\n",
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 99f913c8d7a6..842d55e411fd 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -111,7 +111,6 @@ void __cpuinit synchronise_count_master(void)
void __cpuinit synchronise_count_slave(void)
{
int i;
- unsigned long flags;
unsigned int initcount;
int ncpus;
@@ -123,8 +122,6 @@ void __cpuinit synchronise_count_slave(void)
return;
#endif
- local_irq_save(flags);
-
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
@@ -154,7 +151,5 @@ void __cpuinit synchronise_count_slave(void)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
-
- local_irq_restore(flags);
}
#undef NR_LOOPS
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2d0c2a277f52..c3c293543703 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -132,6 +132,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
+ if (!task)
+ task = current;
+
if (raw_show_trace || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
@@ -1249,6 +1252,7 @@ static inline void parity_protection_init(void)
break;
case CPU_5KC:
+ case CPU_5KE:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1498,6 +1502,7 @@ extern void flush_tlb_handlers(void);
* Timer interrupt
*/
int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
int cp0_compare_irq_shift;
/*
@@ -1597,7 +1602,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
- cp0_compare_irq_shift = cp0_compare_irq;
+ cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
cp0_perfcount_irq = -1;
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 924da5eb7031..df243a64f430 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -72,7 +73,7 @@ SECTIONS
.data : { /* Data */
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- INIT_TASK_DATA(PAGE_SIZE)
+ INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 4aa20280613e..fd6203f14f1f 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,8 +3,8 @@
#
obj-y += cache.o dma-default.o extable.o fault.o \
- gup.o init.o mmap.o page.o tlbex.o \
- tlbex-fault.o uasm.o
+ gup.o init.o mmap.o page.o page-funcs.o \
+ tlbex.o tlbex-fault.o uasm.o
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5109be96d98d..f092c265dc63 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void)
c->icache.linesz = 2 << lsize;
else
c->icache.linesz = lsize;
- c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
@@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void)
c->dcache.linesz = 2 << lsize;
else
c->dcache.linesz= lsize;
- c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
@@ -1051,6 +1051,7 @@ static void __cpuinit probe_pcache(void)
case CPU_R14000:
break;
+ case CPU_M14KC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
new file mode 100644
index 000000000000..48a6b38ff13e
--- /dev/null
+++ b/arch/mips/mm/page-funcs.S
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name clear_page_cpu
+#define cpu_copy_page_function_name copy_page_cpu
+#else
+#define cpu_clear_page_function_name clear_page
+#define cpu_copy_page_function_name copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x058 bytes
+ * R4600 v1.7: 0x05c bytes
+ * R4600 v2.0: 0x060 bytes
+ * With prefetching, 16 word strides 0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x11c bytes
+ * R4600 v1.7: 0x080 bytes
+ * R4600 v2.0: 0x07c bytes
+ * With prefetching, 16 word strides 0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index cc0b626858b3..98f530e18216 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -6,6 +6,7 @@
* Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 Maciej W. Rozycki
* Copyright (C) 2008 Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5];
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x058 bytes
- * R4600 v1.7: 0x05c bytes
- * R4600 v2.0: 0x060 bytes
- * With prefetching, 16 word strides 0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x11c bytes
- * R4600 v1.7: 0x080 bytes
- * R4600 v2.0: 0x07c bytes
- * With prefetching, 16 word strides 0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
static int pref_bias_clear_store __cpuinitdata;
static int pref_bias_copy_load __cpuinitdata;
static int pref_bias_copy_store __cpuinitdata;
@@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
}
}
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
void __cpuinit build_clear_page(void)
{
int off;
- u32 *buf = (u32 *)&clear_page_array;
+ u32 *buf = &__clear_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
@@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void)
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
- BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+ BUG_ON(buf > &__clear_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized clear page handler (%u instructions).\n",
- (u32)(buf - clear_page_array));
+ (u32)(buf - &__clear_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - clear_page_array); i++)
- pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+ for (i = 0; i < (buf - &__clear_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
pr_debug("\t.set pop\n");
}
@@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
void __cpuinit build_copy_page(void)
{
int off;
- u32 *buf = (u32 *)&copy_page_array;
+ u32 *buf = &__copy_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
@@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void)
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
- BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+ BUG_ON(buf > &__copy_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized copy page handler (%u instructions).\n",
- (u32)(buf - copy_page_array));
+ (u32)(buf - &__copy_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - copy_page_array); i++)
- pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+ for (i = 0; i < (buf - &__copy_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
pr_debug("\t.set pop\n");
}
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
/*
* Pad descriptors to cacheline, since each is exclusively owned by a
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0bc485b3cd60..03eb0ef91580 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -9,6 +9,7 @@
* Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
*
* ... and the days got worse and worse and now you see
* I've gone completly out of my mind.
@@ -494,6 +495,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R14000:
case CPU_4KC:
case CPU_4KEC:
+ case CPU_M14KC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index bf80921f2f56..284dea54faf5 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -241,8 +241,9 @@ void __init mips_pcibios_init(void)
return;
}
- if (controller->io_resource->start < 0x00001000UL) /* FIXME */
- controller->io_resource->start = 0x00001000UL;
+ /* Change start address to avoid conflicts with ACPI and SMB devices */
+ if (controller->io_resource->start < 0x00002000UL)
+ controller->io_resource->start = 0x00002000UL;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;
@@ -253,7 +254,7 @@ void __init mips_pcibios_init(void)
}
/* Enable PCI 2.1 compatibility in PIIX4 */
-static void __init quirk_dlcsetup(struct pci_dev *dev)
+static void __devinit quirk_dlcsetup(struct pci_dev *dev)
{
u8 odlc, ndlc;
(void) pci_read_config_byte(dev, 0x82, &odlc);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index b7f37d4982fa..2e28f653f66d 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -111,7 +111,7 @@ static void __init pci_clock_check(void)
unsigned int __iomem *jmpr_p =
(unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
- static const int pciclocks[] __initdata = {
+ static const int pciclocks[] __initconst = {
33, 20, 25, 30, 12, 16, 37, 10
};
int pciclock = pciclocks[jmpr];
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index acb677a1227c..b3df7c2aad1e 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -82,8 +82,10 @@ void __init prom_free_prom_memory(void)
void xlp_mmu_init(void)
{
+ /* enable extended TLB and Large Fixed TLB */
write_c0_config6(read_c0_config6() | 0x24);
- current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+ /* set page mask of Fixed TLB in config7 */
write_c0_config7(PM_DEFAULT_MASK >>
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
}
@@ -100,6 +102,10 @@ void __init prom_init(void)
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
#ifdef CONFIG_SMP
nlm_wakeup_secondary_cpus(0xffffffff);
+
+ /* update TLB size after waking up threads */
+ current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
register_smp_ops(&nlm_smp_ops);
#endif
}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index d1f2d4c52d42..b6e378211a2c 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
switch (current_cpu_type()) {
case CPU_5KC:
+ case CPU_M14KC:
case CPU_20KC:
case CPU_24K:
case CPU_25KF:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index baba3bcaa3c2..4d80a856048d 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -322,6 +322,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.num_counters = counters;
switch (current_cpu_type()) {
+ case CPU_M14KC:
+ op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
+ break;
+
case CPU_20KC:
op_model_mipsxx_ops.cpu_type = "mips/20K";
break;
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
index d5d4c018fb04..0857ab8c3919 100644
--- a/arch/mips/pci/fixup-fuloong2e.c
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
-static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
@@ -60,7 +60,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
-static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev)
{
unsigned char c;
@@ -135,7 +135,7 @@ static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
printk(KERN_INFO"via686b fix: ISA bridge done\n");
}
-static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev)
{
printk(KERN_INFO"via686b fix: IDE\n");
@@ -168,19 +168,19 @@ static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
printk(KERN_INFO"via686b fix: IDE done\n");
}
-static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev)
{
/* irq routing */
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
}
-static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev)
{
/* irq routing */
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
}
-static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev)
{
unsigned int val;
unsigned char c;
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
index 4b9768d5d729..a7b917dcf604 100644
--- a/arch/mips/pci/fixup-lemote2f.c
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
}
/* CS5536 SPEC. fixup */
-static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev)
{
/* the uart1 and uart2 interrupt in PIC is enabled as default */
pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
}
-static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev)
{
/* setting the mutex pin as IDE function */
pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
CS5536_IDE_FLASH_SIGNATURE);
}
-static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev)
{
/* enable the AUDIO interrupt in PIC */
pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
@@ -118,14 +118,14 @@ static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
}
-static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
{
/* enable the OHCI interrupt in PIC */
/* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
}
-static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
{
u32 hi, lo;
@@ -137,7 +137,7 @@ static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
}
-static void __init loongson_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 0f48498bc231..70073c98ed32 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
-static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
- static int piixirqmap[16] __initdata = { /* PIIX PIRQC[A:D] irq mappings */
+ static int piixirqmap[16] __devinitdata = { /* PIIX PIRQC[A:D] irq mappings */
0, 0, 0, 3,
4, 5, 6, 7,
0, 9, 10, 11,
@@ -83,7 +83,7 @@ static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
malta_piix_func0_fixup);
-static void __init malta_piix_func1_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c
index e08f49cb6875..8e4f8288eca2 100644
--- a/arch/mips/pci/fixup-mpc30x.c
+++ b/arch/mips/pci/fixup-mpc30x.c
@@ -22,13 +22,13 @@
#include <asm/vr41xx/mpc30x.h>
-static const int internal_func_irqs[] __initdata = {
+static const int internal_func_irqs[] __initconst = {
VRC4173_CASCADE_IRQ,
VRC4173_AC97_IRQ,
VRC4173_USB_IRQ,
};
-static const int irq_tab_mpc30x[] __initdata = {
+static const int irq_tab_mpc30x[] __initconst = {
[12] = VRC4173_PCMCIA1_IRQ,
[13] = VRC4173_PCMCIA2_IRQ,
[29] = MQ200_IRQ,
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index f0bb9146e6c0..d02900a72916 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -15,7 +15,7 @@
* Set the BCM1250, etc. PCI host bridge's TRDY timeout
* to the finite max.
*/
-static void __init quirk_sb1250_pci(struct pci_dev *dev)
+static void __devinit quirk_sb1250_pci(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0x40, 0xff);
}
@@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
/*
* The BCM1250, etc. PCI/HT bridge reports as a host bridge.
*/
-static void __init quirk_sb1250_ht(struct pci_dev *dev)
+static void __devinit quirk_sb1250_ht(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
@@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
/*
* Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
*/
-static void __init quirk_sp1011(struct pci_dev *dev)
+static void __devinit quirk_sp1011(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0x64, 0xff);
}
diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c
index a1e7e6d80c8c..bc13e29d2bb3 100644
--- a/arch/mips/pci/ops-tx4927.c
+++ b/arch/mips/pci/ops-tx4927.c
@@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
}
#ifdef CONFIG_TOSHIBA_FPCIB0
-static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 0fbe4c0c170a..fdc24440294c 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev)
bridge->b_widget.w_tflush; /* Flush */
}
-static void __init pci_fixup_ioc3(struct pci_dev *d)
+static void __devinit pci_fixup_ioc3(struct pci_dev *d)
{
pci_disable_swapping(d);
}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index ea453532a33c..075d87acd12a 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
/* setup reset gpio used by pci */
reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
- if (reset_gpio > 0)
+ if (gpio_is_valid(reset_gpio))
devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
/* enable auto-switching between PCI and EBU */
@@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
/* toggle reset pin */
- if (reset_gpio > 0) {
+ if (gpio_is_valid(reset_gpio)) {
__gpio_set_value(reset_gpio, 0);
wmb();
mdelay(1);
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 1644805a6730..172af1cd5867 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -41,6 +41,7 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/console.h>
+#include <linux/pci_regs.h>
#include <asm/io.h>
@@ -156,35 +157,55 @@ struct pci_controller nlm_pci_controller = {
.io_offset = 0x00000000UL,
};
+/*
+ * The top level PCIe links on the XLS PCIe controller appear as
+ * bridges. Given a device, this function finds which link it is
+ * on.
+ */
+static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
+{
+ struct pci_bus *bus, *p;
+
+ /* Find the bridge on bus 0 */
+ bus = dev->bus;
+ for (p = bus->parent; p && p->number != 0; p = p->parent)
+ bus = p;
+
+ return p ? bus->self : NULL;
+}
+
static int get_irq_vector(const struct pci_dev *dev)
{
+ struct pci_dev *lnk;
+
if (!nlm_chip_is_xls())
- return PIC_PCIX_IRQ; /* for XLR just one IRQ*/
+ return PIC_PCIX_IRQ; /* for XLR just one IRQ */
/*
* For XLS PCIe, there is an IRQ per Link, find out which
* link the device is on to assign interrupts
- */
- if (dev->bus->self == NULL)
+ */
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
return 0;
- switch (dev->bus->self->devfn) {
- case 0x0:
+ switch (PCI_SLOT(lnk->devfn)) {
+ case 0:
return PIC_PCIE_LINK0_IRQ;
- case 0x8:
+ case 1:
return PIC_PCIE_LINK1_IRQ;
- case 0x10:
+ case 2:
if (nlm_chip_is_xls_b())
return PIC_PCIE_XLSB0_LINK2_IRQ;
else
return PIC_PCIE_LINK2_IRQ;
- case 0x18:
+ case 3:
if (nlm_chip_is_xls_b())
return PIC_PCIE_XLSB0_LINK3_IRQ;
else
return PIC_PCIE_LINK3_IRQ;
}
- WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
+ WARN(1, "Unexpected devfn %d\n", lnk->devfn);
return 0;
}
@@ -202,7 +223,27 @@ void arch_teardown_msi_irq(unsigned int irq)
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_msg msg;
+ struct pci_dev *lnk;
int irq, ret;
+ u16 val;
+
+ /* MSI not supported on XLR */
+ if (!nlm_chip_is_xls())
+ return 1;
+
+ /*
+ * Enable MSI on the XLS PCIe controller bridge which was disabled
+ * at enumeration, the bridge MSI capability is at 0x50
+ */
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
+ return 1;
+
+ pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
+ if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
+ }
irq = get_irq_vector(dev);
if (irq <= 0)
@@ -327,7 +368,7 @@ static int __init pcibios_init(void)
}
} else {
/* XLR PCI controller ACK */
- irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
+ irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
}
return 0;
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index b71fae231049..5edab2bc6fc0 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
*/
static void __cpuinit yos_init_secondary(void)
{
- set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
static void __cpuinit yos_smp_finish(void)
{
+ set_c0_status(ST0_CO | ST0_IM | ST0_IE);
}
/* Hook for after all CPUs are online */
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
index 0a170e0ffeaa..7773f3d956b0 100644
--- a/arch/mips/powertv/asic/asic-calliope.c
+++ b/arch/mips/powertv/asic/asic-calliope.c
@@ -28,7 +28,7 @@
#define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x))
-const struct register_map calliope_register_map __initdata = {
+const struct register_map calliope_register_map __initconst = {
.eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
.eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
.eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
index bbc0c122be5e..da076db7b7ed 100644
--- a/arch/mips/powertv/asic/asic-cronus.c
+++ b/arch/mips/powertv/asic/asic-cronus.c
@@ -28,7 +28,7 @@
#define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
-const struct register_map cronus_register_map __initdata = {
+const struct register_map cronus_register_map __initconst = {
.eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
.eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
.eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
index 91dda682752c..47683b370e74 100644
--- a/arch/mips/powertv/asic/asic-gaia.c
+++ b/arch/mips/powertv/asic/asic-gaia.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <asm/mach-powertv/asic.h>
-const struct register_map gaia_register_map __initdata = {
+const struct register_map gaia_register_map __initconst = {
.eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
.eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
.eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
index 4a05bb096476..6ff4b10f09da 100644
--- a/arch/mips/powertv/asic/asic-zeus.c
+++ b/arch/mips/powertv/asic/asic-zeus.c
@@ -28,7 +28,7 @@
#define ZEUS_ADDR(x) (ZEUS_IO_BASE + (x))
-const struct register_map zeus_register_map __initdata = {
+const struct register_map zeus_register_map __initconst = {
.eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
.eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
.eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 682efb0c108d..64eb71b15280 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq)
return err;
}
-static void __init quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
{
int irq; /* PCI/ISA Bridge interrupt */
u8 reg_64;
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 55b79ef10028..44251b974f1d 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -81,9 +81,6 @@ struct pt_regs {
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD 0x00000001
-
#ifdef __KERNEL__
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 08251d6f6b11..ac519bbd42ff 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_info(struct thread_info *ti);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index bd4e90dfe6c2..f8e66425cbf8 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -11,7 +11,6 @@
#ifndef _ASM_TIMEX_H
#define _ASM_TIMEX_H
-#include <asm/hardirq.h>
#include <unit/timex.h>
#define TICK_SIZE (tick_nsec / 1000)
@@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
extern int init_clockevents(void);
extern int init_clocksource(void);
-static inline void setup_jiffies_interrupt(int irq,
- struct irqaction *action)
-{
- u16 tmp;
- setup_irq(irq, action);
- set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
- GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- tmp = GxICR(irq);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
index 69cae0260786..ccce35e3e179 100644
--- a/arch/mn10300/kernel/cevt-mn10300.c
+++ b/arch/mn10300/kernel/cevt-mn10300.c
@@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
{
}
+static inline void setup_jiffies_interrupt(int irq,
+ struct irqaction *action)
+{
+ u16 tmp;
+ setup_irq(irq, action);
+ set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+ GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+ tmp = GxICR(irq);
+}
+
int __init init_clockevents(void)
{
struct clock_event_device *cd;
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h
index a5ac755dd69f..2df440105a80 100644
--- a/arch/mn10300/kernel/internal.h
+++ b/arch/mn10300/kernel/internal.h
@@ -9,6 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/irqreturn.h>
+
struct clocksource;
struct clock_event_device;
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 2381df83bd00..35932a8de8b8 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
case SC1TXIRQ:
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
case TM12IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER9
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
case TM9IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER3
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
case TM3IRQ:
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
#endif /* CONFIG_MN10300_TTYSM1 */
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 6ab0bee2a54f..4d584ae29ae1 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -459,10 +459,11 @@ static int handle_signal(int sig,
else
ret = setup_frame(sig, ka, oldset, regs);
if (ret)
- return;
+ return ret;
signal_delivered(sig, info, ka, regs,
- test_thread_flag(TIF_SINGLESTEP));
+ test_thread_flag(TIF_SINGLESTEP));
+ return 0;
}
/*
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index 94a9c6d53e1b..b900e5afa0ae 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -26,6 +26,7 @@
#include <linux/kdebug.h>
#include <linux/bug.h>
#include <linux/irq.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/io.h>
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 159acb02cfd4..e244ebe637e1 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/export.h>
#include <asm/io.h>
static unsigned long pci_sram_allocated = 0xbc000000;
diff --git a/arch/mn10300/unit-asb2303/include/unit/timex.h b/arch/mn10300/unit-asb2303/include/unit/timex.h
index cc18fe7d8b90..c37f9832cf17 100644
--- a/arch/mn10300/unit-asb2303/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2303/include/unit/timex.h
@@ -11,10 +11,6 @@
#ifndef _ASM_UNIT_TIMEX_H
#define _ASM_UNIT_TIMEX_H
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
#include <asm/timer-regs.h>
#include <unit/clock.h>
#include <asm/param.h>
diff --git a/arch/mn10300/unit-asb2303/smc91111.c b/arch/mn10300/unit-asb2303/smc91111.c
index 43c246439413..53677694b165 100644
--- a/arch/mn10300/unit-asb2303/smc91111.c
+++ b/arch/mn10300/unit-asb2303/smc91111.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include <asm/timex.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h
index 758af30d1a16..4cefc224f448 100644
--- a/arch/mn10300/unit-asb2305/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2305/include/unit/timex.h
@@ -11,10 +11,6 @@
#ifndef _ASM_UNIT_TIMEX_H
#define _ASM_UNIT_TIMEX_H
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
#include <asm/timer-regs.h>
#include <unit/clock.h>
#include <asm/param.h>
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index e1becd6b7571..bc4adfaf815c 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
index ddb7ed010706..42f32db75087 100644
--- a/arch/mn10300/unit-asb2364/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2364/include/unit/timex.h
@@ -11,10 +11,6 @@
#ifndef _ASM_UNIT_TIMEX_H
#define _ASM_UNIT_TIMEX_H
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
#include <asm/timer-regs.h>
#include <unit/clock.h>
#include <asm/param.h>
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index dbc3850b1d0d..5707f1a62341 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
+LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
MACHINE := $(shell uname -m)
ifeq ($(MACHINE),parisc*)
@@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/
kernel-$(CONFIG_HPUX) += hpux/
core-y += $(addprefix arch/parisc/, $(kernel-y))
-libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
+libs-y += arch/parisc/lib/ $(LIBGCC)
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 19a434f55059..4383707d9801 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,4 @@
include include/asm-generic/Kbuild.asm
header-y += pdc.h
+generic-y += word-at-a-time.h
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 72cfdb0cfdd1..62a33338549c 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -1,6 +1,8 @@
#ifndef _PARISC_BUG_H
#define _PARISC_BUG_H
+#include <linux/kernel.h> /* for BUGFLAG_TAINT */
+
/*
* Tell the user there is some problem.
* The offending file and line are encoded in the __bug_table section.
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c9aac24b02e2..0554ab062bdc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
}
#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
#else
#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
@@ -100,6 +100,14 @@ static inline void hard_irq_disable(void)
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
}
+/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
+#define hard_irq_disable hard_irq_disable
+
+static inline bool lazy_irq_pending(void)
+{
+ return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
/*
* This is called by asynchronous interrupts to conditionally
* re-enable hard interrupts when soft-disabled after having
@@ -117,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !regs->softe;
}
+extern bool prep_irq_for_idle(void);
+
#else /* CONFIG_PPC64 */
#define SET_MSR_EE(x) mtmsr(x)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ed1718feb9d9..5971c85df136 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
-#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
- li r0,_TIF_NEED_RESCHED /* bits to check */
ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9)
- /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
- rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
- and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
- bne do_work
-
-#else /* !CONFIG_PREEMPT */
- ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
- beq restore /* if not, just restore regs and return */
+ beq resume_kernel
/* Check current_thread_info()->flags */
+ andi. r0,r4,_TIF_USER_WORK_MASK
+ beq restore
+
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq 1f
+ bl .restore_interrupts
+ bl .schedule
+ b .ret_from_except_lite
+
+1: bl .save_nvgprs
+ bl .restore_interrupts
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_notify_resume
+ b .ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq+ restore
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+ crandc eq,cr1*4+eq,eq
+ bne restore
+
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first
+ */
+ SOFT_DISABLE_INTS(r3,r4)
+1: bl .preempt_schedule_irq
+
+ /* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_USER_WORK_MASK
- bne do_work
-#endif /* !CONFIG_PREEMPT */
+ andi. r0,r4,_TIF_NEED_RESCHED
+ bne 1b
+#endif /* CONFIG_PREEMPT */
.globl fast_exc_return_irq
fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */
1: b .ret_from_except /* What else to do here ? */
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
- andi. r0,r3,MSR_PR /* Returning to user mode? */
- bne user_work
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr1,r8,0
- ld r0,SOFTE(r1)
- cmpdi r0,0
- crandc eq,cr1*4+eq,eq
- bne restore
-
- /*
- * Here we are preempting the current task. We want to make
- * sure we are soft-disabled first
- */
- SOFT_DISABLE_INTS(r3,r4)
-1: bl .preempt_schedule_irq
-
- /* Re-test flags and eventually loop */
- clrrdi r9,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
- bne 1b
- b restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- bl .restore_interrupts
- bl .schedule
- b .ret_from_except_lite
-
-1: bl .save_nvgprs
- bl .restore_interrupts
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_notify_resume
- b .ret_from_except
-
unrecov_restore:
addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7835a5e1ea5f..1f017bb7a7ce 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
else {
/*
* We should already be hard disabled here. We had bugs
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* NOTE: This is called with interrupts hard disabled but not marked
* as such in paca->irq_happened, so we need to resync this.
*/
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
{
if (irqs_disabled()) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -286,6 +286,52 @@ void restore_interrupts(void)
__hard_irq_enable();
}
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ hard_irq_disable();
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ /*
+ * Mark interrupts as soft-enabled and clear the
+ * PACA_IRQ_HARD_DIS from the pending mask since we
+ * are about to hard enable as well as a side effect
+ * of entering the low power state.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ local_paca->soft_enabled = 1;
+
+ /* Tell the caller to enter the low power state */
+ return true;
+}
+
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 0b6d79617d7b..2e3200ca485f 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
- && entry->jump[1] == 0x396b0000 + (val & 0xffff))
+ if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
+ && entry->jump[1] == 0x398c0000 + (val & 0xffff))
return 1;
return 0;
}
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- /* Stolen from Paul Mackerras as well... */
- entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
- entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
- entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
+ entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
+ entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
+ entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
entry->jump[3] = 0x4e800420; /* bctr */
DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1b488e5305c5..0794a3017b1b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
extern char opal_secondary_entry;
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
{
long rc;
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
prom_debug("prom_opal_hold_cpus: end...\n");
}
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
{
struct opal_secondary_data *data = &RELOC(opal_secondary_data);
struct opal_takeover_args *args = &data->args;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99a995c2a3f2..be171ee73bf8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs)
struct pt_regs *old_regs;
u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
struct clock_event_device *evt = &__get_cpu_var(decrementers);
+ u64 now;
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs)
irq_work_run();
}
- *next_tb = ~(u64)0;
- if (evt->event_handler)
- evt->event_handler(evt);
+ now = get_tb_or_rtc();
+ if (now >= *next_tb) {
+ *next_tb = ~(u64)0;
+ if (evt->event_handler)
+ evt->event_handler(evt);
+ } else {
+ now = *next_tb - now;
+ if (now <= DECREMENTER_MAX)
+ set_dec((int)now);
+ }
#ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c6af1d623839..3abe1b86e583 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
return err;
}
-static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
{
+ struct kvm *kvm = vcpu->kvm;
void *va;
unsigned long nb;
+ unsigned long gpa;
- vpap->update_pending = 0;
- va = NULL;
- if (vpap->next_gpa) {
- va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
- if (nb < vpap->len) {
- /*
- * If it's now too short, it must be that userspace
- * has changed the mappings underlying guest memory,
- * so unregister the region.
- */
+ /*
+ * We need to pin the page pointed to by vpap->next_gpa,
+ * but we can't call kvmppc_pin_guest_page under the lock
+ * as it does get_user_pages() and down_read(). So we
+ * have to drop the lock, pin the page, then get the lock
+ * again and check that a new area didn't get registered
+ * in the meantime.
+ */
+ for (;;) {
+ gpa = vpap->next_gpa;
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ va = NULL;
+ nb = 0;
+ if (gpa)
+ va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (gpa == vpap->next_gpa)
+ break;
+ /* sigh... unpin that one and try again */
+ if (va)
kvmppc_unpin_guest_page(kvm, va);
- va = NULL;
- }
+ }
+
+ vpap->update_pending = 0;
+ if (va && nb < vpap->len) {
+ /*
+ * If it's now too short, it must be that userspace
+ * has changed the mappings underlying guest memory,
+ * so unregister the region.
+ */
+ kvmppc_unpin_guest_page(kvm, va);
+ va = NULL;
}
if (vpap->pinned_addr)
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
-
spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) {
- kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
}
if (vcpu->arch.dtl.update_pending) {
- kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0;
}
if (vcpu->arch.slb_shadow.update_pending)
- kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
spin_unlock(&vcpu->arch.vpa_update_lock);
}
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
struct kvm_vcpu *vcpu, *vcpu0, *vnext;
long ret;
u64 now;
- int ptid, i;
+ int ptid, i, need_vpa_update;
/* don't start if any threads have a signal pending */
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ need_vpa_update = 0;
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
if (signal_pending(vcpu->arch.run_task))
return 0;
+ need_vpa_update |= vcpu->arch.vpa.update_pending |
+ vcpu->arch.slb_shadow.update_pending |
+ vcpu->arch.dtl.update_pending;
+ }
+
+ /*
+ * Initialize *vc, in particular vc->vcore_state, so we can
+ * drop the vcore lock if necessary.
+ */
+ vc->n_woken = 0;
+ vc->nap_count = 0;
+ vc->entry_exit_count = 0;
+ vc->vcore_state = VCORE_RUNNING;
+ vc->in_guest = 0;
+ vc->napping_threads = 0;
+
+ /*
+ * Updating any of the vpas requires calling kvmppc_pin_guest_page,
+ * which can't be called with any spinlocks held.
+ */
+ if (need_vpa_update) {
+ spin_unlock(&vc->lock);
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ kvmppc_update_vpas(vcpu);
+ spin_lock(&vc->lock);
+ }
/*
* Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
if (vcpu->arch.ceded)
vcpu->arch.ptid = ptid++;
- vc->n_woken = 0;
- vc->nap_count = 0;
- vc->entry_exit_count = 0;
- vc->vcore_state = VCORE_RUNNING;
vc->stolen_tb += mftb() - vc->preempt_tb;
- vc->in_guest = 0;
vc->pcpu = smp_processor_id();
- vc->napping_threads = 0;
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
- if (vcpu->arch.vpa.update_pending ||
- vcpu->arch.slb_shadow.update_pending ||
- vcpu->arch.dtl.update_pending)
- kvmppc_update_vpas(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
}
/* Grab any remaining hw threads so they can't go into the kernel */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..a1044f43becd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
lwz r3,VCORE_NAPPING_THREADS(r5)
lwz r4,VCPU_PTID(r9)
li r0,1
- sldi r0,r0,r4
+ sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 3ff9013d6e79..ee02b30878ed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
+ vcpu->arch.shared->msr |= MSR_EE;
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b6edbb3b4a54..1e95556dc692 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -635,11 +635,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
*/
static void __init parse_drconf_memory(struct device_node *memory)
{
- const u32 *dm, *usm;
+ const u32 *uninitialized_var(dm), *usm;
unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long lmb_size, base, size, sz;
int nid;
- struct assoc_arrays aa;
+ struct assoc_arrays aa = { .arrays = NULL };
n = of_get_drconf_memory(memory, &dm);
if (!n)
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 55ba3855a97f..7d3a3b5619a2 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
mr r4, r_addr; \
li r6, SIZE; \
bl skb_copy_bits; \
+ nop; \
/* R3 = 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
mr r4, r_addr; \
li r5, SIZE; \
bl bpf_internal_load_pointer_neg_helper; \
+ nop; \
/* R3 != 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index efdacc829576..d17e98bc0c10 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
{
unsigned long ctrl, thread_switch_control;
- /*
- * We need to hard disable interrupts, the local_irq_enable() done by
- * our caller upon return will hard re-enable.
- */
- hard_irq_disable();
+ /* Ensure our interrupt state is properly tracked */
+ if (!prep_irq_for_idle())
+ return;
ctrl = mfspr(SPRN_CTRLF);
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
*/
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
mtspr(SPRN_CTRLT, ctrl);
+
+ /* Re-enable interrupts in MSR */
+ __hard_irq_enable();
}
static int cbe_system_reset_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0915b1ad66ce..2d311c0caf8e 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
tcep++;
}
- if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+ if (tbl->it_type & TCE_PCI_SWINV_CREATE)
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
return 0;
}
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
while (npages--)
*(tcep++) = 0;
- if (tbl->it_type == TCE_PCI_SWINV_FREE)
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
}
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 36f957f31842..8733a86ad52e 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
};
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason,
- const char *old_msgs, unsigned long old_len,
- const char *new_msgs, unsigned long new_len);
+ enum kmsg_dump_reason reason);
static struct kmsg_dumper nvram_kmsg_dumper = {
.dump = oops_to_nvram
@@ -504,28 +502,6 @@ int __init pSeries_nvram_init(void)
}
/*
- * Try to capture the last capture_len bytes of the printk buffer. Return
- * the amount actually captured.
- */
-static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
- const char *new_msgs, size_t new_len,
- char *captured, size_t capture_len)
-{
- if (new_len >= capture_len) {
- memcpy(captured, new_msgs + (new_len - capture_len),
- capture_len);
- return capture_len;
- } else {
- /* Grab the end of old_msgs. */
- size_t old_tail_len = min(old_len, capture_len - new_len);
- memcpy(captured, old_msgs + (old_len - old_tail_len),
- old_tail_len);
- memcpy(captured + old_tail_len, new_msgs, new_len);
- return old_tail_len + new_len;
- }
-}
-
-/*
* Are we using the ibm,rtas-log for oops/panic reports? And if so,
* would logging this oops/panic overwrite an RTAS event that rtas_errd
* hasn't had a chance to read and process? Return 1 if so, else 0.
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
NVRAM_RTAS_READ_TIMEOUT);
}
-/* Squeeze out each line's <n> severity prefix. */
-static size_t elide_severities(char *buf, size_t len)
-{
- char *in, *out, *buf_end = buf + len;
- /* Assume a <n> at the very beginning marks the start of a line. */
- int newline = 1;
-
- in = out = buf;
- while (in < buf_end) {
- if (newline && in+3 <= buf_end &&
- *in == '<' && isdigit(in[1]) && in[2] == '>') {
- in += 3;
- newline = 0;
- } else {
- newline = (*in == '\n');
- *out++ = *in++;
- }
- }
- return out - buf;
-}
-
/* Derived from logfs_compress() */
static int nvram_compress(const void *in, void *out, size_t inlen,
size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
* partition. If that's too much, go back and capture uncompressed text.
*/
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason,
- const char *old_msgs, unsigned long old_len,
- const char *new_msgs, unsigned long new_len)
+ enum kmsg_dump_reason reason)
{
static unsigned int oops_count = 0;
static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
return;
if (big_oops_buf) {
- text_len = capture_last_msgs(old_msgs, old_len,
- new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
- text_len = elide_severities(big_oops_buf, text_len);
+ kmsg_dump_get_buffer(dumper, false,
+ big_oops_buf, big_oops_buf_sz, &text_len);
rc = zip_oops(text_len);
}
if (rc != 0) {
- text_len = capture_last_msgs(old_msgs, old_len,
- new_msgs, new_len, oops_data, oops_data_sz);
+ kmsg_dump_rewind(dumper);
+ kmsg_dump_get_buffer(dumper, true,
+ oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
*oops_len = (u16) text_len;
}
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 41a34bc4a9a2..c71be66bd5dc 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -99,15 +99,18 @@ out:
static void check_and_cede_processor(void)
{
/*
- * Interrupts are soft-disabled at this point,
- * but not hard disabled. So an interrupt might have
- * occurred before entering NAP, and would be potentially
- * lost (edge events, decrementer events, etc...) unless
- * we first hard disable then check.
+ * Ensure our interrupt state is properly tracked,
+ * also checks if no interrupt has occurred while we
+ * were soft-disabled
*/
- hard_irq_disable();
- if (get_paca()->irq_happened == 0)
+ if (prep_irq_for_idle()) {
cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+ }
}
static int dedicated_cede_loop(struct cpuidle_device *dev,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0f3ab06d2222..eab3492a45c5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
/* print cpus waiting or in xmon */
printf("cpus stopped:");
count = 0;
- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ for_each_possible_cpu(cpu) {
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
if (count == 0)
printf(" %x", cpu);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 99bcd0ee838d..31d9db7913e4 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -32,6 +32,8 @@ config SUPERH
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 46edf070da1c..aed701c7b11b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -9,6 +9,12 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
+ endif
+endif
+
isa-y := any
isa-$(CONFIG_SH_DSP) := sh
isa-$(CONFIG_CPU_SH2) := sh2
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
KBUILD_DEFCONFIG := cayman_defconfig
endif
-ifneq ($(SUBARCH),$(ARCH))
- ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
- endif
-endif
-
ifdef CONFIG_CPU_LITTLE_ENDIAN
ld-bfd := elf32-$(UTS_MACHINE)-linux
-LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd)
+LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
LDFLAGS += -EL
else
ld-bfd := elf32-$(UTS_MACHINE)big-linux
-LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd)
+LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
LDFLAGS += -EB
endif
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 158c9176e42a..43a179ce9afc 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -201,8 +201,8 @@ static struct resource kfr2r09_usb0_gadget_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = evtirq(0xa20),
- .end = evtirq(0xa20),
+ .start = evt2irq(0xa20),
+ .end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index c045142f7338..9e702f2f8045 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -239,7 +239,7 @@ static int __init pcie_clk_init(struct sh7786_pcie_port *port)
clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
clk->enable_bit = BITS_CKE;
- ret = sh_clk_mstp32_register(clk, 1);
+ ret = sh_clk_mstp_register(clk, 1);
if (unlikely(ret < 0))
goto err_phy;
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7beb42322f60..7b673ddcd555 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,5 +1,39 @@
include include/asm-generic/Kbuild.asm
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += ucontext.h
+generic-y += xor.h
+
header-y += cachectl.h
header-y += cpu-features.h
header-y += hw_breakpoint.h
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/sh/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h
deleted file mode 100644
index 6ca395d1393e..000000000000
--- a/arch/sh/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SH_CPUTIME_H
-#define __SH_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SH_CPUTIME_H */
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h
deleted file mode 100644
index 4c51401b5537..000000000000
--- a/arch/sh/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/sh/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/sh/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/sh/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h
deleted file mode 100644
index 51cf6f9cebb8..000000000000
--- a/arch/sh/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_ERRNO_H
-#define __ASM_SH_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* __ASM_SH_ERRNO_H */
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/arch/sh/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/sh/include/asm/io_noioport.h b/arch/sh/include/asm/io_noioport.h
index e136d28d1d2e..4d48f1436a63 100644
--- a/arch/sh/include/asm/io_noioport.h
+++ b/arch/sh/include/asm/io_noioport.h
@@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
return -1;
}
-#define outb(x, y) BUG()
-#define outw(x, y) BUG()
-#define outl(x, y) BUG()
+static inline void outb(unsigned char x, unsigned long port)
+{
+ BUG();
+}
+
+static inline void outw(unsigned short x, unsigned long port)
+{
+ BUG();
+}
+
+static inline void outl(unsigned int x, unsigned long port)
+{
+ BUG();
+}
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/sh/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/sh/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/sh/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/sh/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h
deleted file mode 100644
index 9ed9b9cb459a..000000000000
--- a/arch/sh/include/asm/local.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_LOCAL_H
-#define __ASM_SH_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_SH_LOCAL_H */
-
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/sh/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/sh/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/sh/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/sh/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h
deleted file mode 100644
index cf252af64590..000000000000
--- a/arch/sh/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h
deleted file mode 100644
index 4db4b39a4399..000000000000
--- a/arch/sh/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_SH_PERCPU
-#define __ARCH_SH_PERCPU
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_SH_PERCPU */
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/sh/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h
deleted file mode 100644
index 9c2499a86ec0..000000000000
--- a/arch/sh/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_RESOURCE_H
-#define __ASM_SH_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* __ASM_SH_RESOURCE_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
deleted file mode 100644
index 98dfc3510f10..000000000000
--- a/arch/sh/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SCATTERLIST_H
-#define __ASM_SH_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_SH_SCATTERLIST_H */
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/sh/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/sh/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/sh/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h
deleted file mode 100644
index 813040ed68a9..000000000000
--- a/arch/sh/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SIGINFO_H
-#define __ASM_SH_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* __ASM_SH_SIGINFO_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
deleted file mode 100644
index dd248c2e1085..000000000000
--- a/arch/sh/include/asm/sizes.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sizes.h>
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/sh/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h
deleted file mode 100644
index 9202a023328f..000000000000
--- a/arch/sh/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_STATFS_H
-#define __ASM_SH_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* __ASM_SH_STATFS_H */
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/sh/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/sh/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 050f221fa898..8698a80ed00c 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -25,6 +25,8 @@
(__chk_user_ptr(addr), \
__access_ok((unsigned long __force)(addr), (size)))
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
+
/*
* Uh, these should become the main single-value transfer routines ...
* They automatically use the right size if we just have the right
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
# include "uaccess_64.h"
#endif
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
/* Generic arbitrary sized copy. */
/* Return the number of bytes NOT copied */
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
__cl_size; \
})
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
- * @src: Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-#define strncpy_from_user(dest,src,count) \
-({ \
- unsigned long __sfu_src = (unsigned long)(src); \
- int __sfu_count = (int)(count); \
- long __sfu_res = -EFAULT; \
- \
- if (__access_ok(__sfu_src, __sfu_count)) \
- __sfu_res = __strncpy_from_user((unsigned long)(dest), \
- __sfu_src, __sfu_count); \
- \
- __sfu_res; \
-})
-
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_size;
}
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Context: User context only. This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-static inline long strnlen_user(const char __user *s, long n)
-{
- if (!__addr_ok(s))
- return 0;
- else
- return __strnlen_user(s, n);
-}
-
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
-
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index ae0d24f6653f..c0de7ee35ab7 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -170,79 +170,4 @@ __asm__ __volatile__( \
extern void __put_user_unknown(void);
-static inline int
-__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
-{
- __kernel_size_t res;
- unsigned long __dummy, _d, _s, _c;
-
- __asm__ __volatile__(
- "9:\n"
- "mov.b @%2+, %1\n\t"
- "cmp/eq #0, %1\n\t"
- "bt/s 2f\n"
- "1:\n"
- "mov.b %1, @%3\n\t"
- "dt %4\n\t"
- "bf/s 9b\n\t"
- " add #1, %3\n\t"
- "2:\n\t"
- "sub %4, %0\n"
- "3:\n"
- ".section .fixup,\"ax\"\n"
- "4:\n\t"
- "mov.l 5f, %1\n\t"
- "jmp @%1\n\t"
- " mov %9, %0\n\t"
- ".balign 4\n"
- "5: .long 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .balign 4\n"
- " .long 9b,4b\n"
- ".previous"
- : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
- : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
- "i" (-EFAULT)
- : "memory", "t");
-
- return res;
-}
-
-/*
- * Return the size of a string (including the ending 0 even when we have
- * exceeded the maximum string length).
- */
-static inline long __strnlen_user(const char __user *__s, long __n)
-{
- unsigned long res;
- unsigned long __dummy;
-
- __asm__ __volatile__(
- "1:\t"
- "mov.b @(%0,%3), %1\n\t"
- "cmp/eq %4, %0\n\t"
- "bt/s 2f\n\t"
- " add #1, %0\n\t"
- "tst %1, %1\n\t"
- "bf 1b\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3:\n\t"
- "mov.l 4f, %1\n\t"
- "jmp @%1\n\t"
- " mov #0, %0\n"
- ".balign 4\n"
- "4: .long 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .balign 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=z" (res), "=&r" (__dummy)
- : "0" (0), "r" (__s), "r" (__n)
- : "t");
- return res;
-}
-
#endif /* __ASM_SH_UACCESS_32_H */
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 56fd20b8cdcc..2e07e0f40c6a 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
extern long __put_user_asm_q(void *, long);
extern void __put_user_unknown(void);
-extern long __strnlen_user(const char *__s, long __n);
-extern int __strncpy_from_user(unsigned long __dest,
- unsigned long __user __src, int __count);
-
#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/sh/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6e38953ff7fd
--- /dev/null
+++ b/arch/sh/include/asm/word-at-a-time.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_SH_WORD_AT_A_TIME_H
+#define __ASM_SH_WORD_AT_A_TIME_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+# include <asm-generic/word-at-a-time.h>
+#else
+/*
+ * Little-endian version cribbed from x86.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+#endif
+
+#endif
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/sh/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
deleted file mode 100644
index 1192e1c761a7..000000000000
--- a/arch/sh/include/cpu-sh2a/cpu/ubc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * SH-2A UBC definitions
- *
- * Copyright (C) 2008 Kieran Bingham
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ASM_CPU_SH2A_UBC_H
-#define __ASM_CPU_SH2A_UBC_H
-
-#define UBC_BARA 0xfffc0400
-#define UBC_BAMRA 0xfffc0404
-#define UBC_BBRA 0xfffc04a0 /* 16 bit access */
-#define UBC_BDRA 0xfffc0408
-#define UBC_BDMRA 0xfffc040c
-
-#define UBC_BARB 0xfffc0410
-#define UBC_BAMRB 0xfffc0414
-#define UBC_BBRB 0xfffc04b0 /* 16 bit access */
-#define UBC_BDRB 0xfffc0418
-#define UBC_BDMRB 0xfffc041c
-
-#define UBC_BRCR 0xfffc04c0
-
-#endif /* __ASM_CPU_SH2A_UBC_H */
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
index 8832c526cdf9..c4a0336660dd 100644
--- a/arch/sh/kernel/cpu/sh3/serial-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
@@ -2,7 +2,7 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
-#include <asm/gpio.h>
+#include <cpu/gpio.h>
static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index ea01a72f1b94..53638e231cd0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -283,7 +283,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index 7ac07b4f75de..22e485d1990b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -276,7 +276,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 8e1f97010c0d..c4cb740e4d10 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -261,7 +261,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+ ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 35f75cf0c7e5..37c41c7747a3 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -311,7 +311,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+ ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2a87901673fe..c87e78f73234 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -375,7 +375,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+ ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
index 1697642c1f73..deb683abacf0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
@@ -260,7 +260,7 @@ int __init arch_clk_init(void)
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 04ab5aeaf920..e84a43229b9c 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -148,7 +148,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index ab1c58f2d101..1c83788db76a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -175,7 +175,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index 491709483e10..8bba6f159023 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -194,7 +194,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 0f11b392bf46..a9422dab0ce7 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -149,7 +149,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index ff1f0e6e9bec..b7cf6a547f11 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -1569,86 +1569,6 @@ ___clear_user_exit:
#endif /* CONFIG_MMU */
/*
- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
- * int __count)
- *
- * Inputs:
- * (r2) target address
- * (r3) source address
- * (r4) maximum size in bytes
- *
- * Ouputs:
- * (*r2) copied data
- * (r2) -EFAULT (in case of faulting)
- * copied data (otherwise)
- */
- .global __strncpy_from_user
-__strncpy_from_user:
- pta ___strncpy_from_user1, tr0
- pta ___strncpy_from_user_done, tr1
- or r4, ZERO, r5 /* r5 = original count */
- beq/u r4, r63, tr1 /* early exit if r4==0 */
- movi -(EFAULT), r6 /* r6 = reply, no real fixup */
- or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
-
-___strncpy_from_user1:
- ld.b r3, 0, r7 /* Fault address: only in reading */
- st.b r2, 0, r7
- addi r2, 1, r2
- addi r3, 1, r3
- beq/u ZERO, r7, tr1
- addi r4, -1, r4 /* return real number of copied bytes */
- bne/l ZERO, r4, tr0
-
-___strncpy_from_user_done:
- sub r5, r4, r6 /* If done, return copied */
-
-___strncpy_from_user_exit:
- or r6, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * extern long __strnlen_user(const char *__s, long __n)
- *
- * Inputs:
- * (r2) source address
- * (r3) source size in bytes
- *
- * Ouputs:
- * (r2) -EFAULT (in case of faulting)
- * string length (otherwise)
- */
- .global __strnlen_user
-__strnlen_user:
- pta ___strnlen_user_set_reply, tr0
- pta ___strnlen_user1, tr1
- or ZERO, ZERO, r5 /* r5 = counter */
- movi -(EFAULT), r6 /* r6 = reply, no real fixup */
- or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
- beq r3, ZERO, tr0
-
-___strnlen_user1:
- ldx.b r2, r5, r7 /* Fault address: only in reading */
- addi r3, -1, r3 /* No real fixup */
- addi r5, 1, r5
- beq r3, ZERO, tr0
- bne r7, ZERO, tr1
-! The line below used to be active. This meant led to a junk byte lying between each pair
-! of entries in the argv & envp structures in memory. Whilst the program saw the right data
-! via the argv and envp arguments to main, it meant the 'flat' representation visible through
-! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
-! addi r5, 1, r5 /* Include '\0' */
-
-___strnlen_user_set_reply:
- or r5, ZERO, r6 /* If done, return counter */
-
-___strnlen_user_exit:
- or r6, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
* extern long __get_user_asm_?(void *val, long addr)
*
* Inputs:
@@ -1982,8 +1902,6 @@ asm_uaccess_start:
.long ___copy_user2, ___copy_user_exit
.long ___clear_user1, ___clear_user_exit
#endif
- .long ___strncpy_from_user1, ___strncpy_from_user_exit
- .long ___strnlen_user1, ___strnlen_user_exit
.long ___get_user_asm_b1, ___get_user_asm_b_exit
.long ___get_user_asm_w1, ___get_user_asm_w_exit
.long ___get_user_asm_l1, ___get_user_asm_l_exit
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 9b7a459a4613..055d91b70305 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/stackprotector.h>
+#include <asm/fpu.h>
struct kmem_cache *task_xstate_cachep = NULL;
unsigned int xstate_size;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 4264583eabac..602545b12a86 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -33,6 +33,7 @@
#include <asm/switch_to.h>
struct task_struct *last_task_used_math = NULL;
+struct pt_regs fake_swapper_regs = { 0, };
void show_regs(struct pt_regs *regs)
{
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index 45afa5c51f67..26a0774f5272 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
EXPORT_SYMBOL(__get_user_asm_w);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_q);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h
deleted file mode 100644
index 870db5928577..000000000000
--- a/arch/sparc/include/asm/cmt.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _SPARC64_CMT_H
-#define _SPARC64_CMT_H
-
-/* cmt.h: Chip Multi-Threading register definitions
- *
- * Copyright (C) 2004 David S. Miller (davem@redhat.com)
- */
-
-/* ASI_CORE_ID - private */
-#define LP_ID 0x0000000000000010UL
-#define LP_ID_MAX 0x00000000003f0000UL
-#define LP_ID_ID 0x000000000000003fUL
-
-/* ASI_INTR_ID - private */
-#define LP_INTR_ID 0x0000000000000000UL
-#define LP_INTR_ID_ID 0x00000000000003ffUL
-
-/* ASI_CESR_ID - private */
-#define CESR_ID 0x0000000000000040UL
-#define CESR_ID_ID 0x00000000000000ffUL
-
-/* ASI_CORE_AVAILABLE - shared */
-#define LP_AVAIL 0x0000000000000000UL
-#define LP_AVAIL_1 0x0000000000000002UL
-#define LP_AVAIL_0 0x0000000000000001UL
-
-/* ASI_CORE_ENABLE_STATUS - shared */
-#define LP_ENAB_STAT 0x0000000000000010UL
-#define LP_ENAB_STAT_1 0x0000000000000002UL
-#define LP_ENAB_STAT_0 0x0000000000000001UL
-
-/* ASI_CORE_ENABLE - shared */
-#define LP_ENAB 0x0000000000000020UL
-#define LP_ENAB_1 0x0000000000000002UL
-#define LP_ENAB_0 0x0000000000000001UL
-
-/* ASI_CORE_RUNNING - shared */
-#define LP_RUNNING_RW 0x0000000000000050UL
-#define LP_RUNNING_W1S 0x0000000000000060UL
-#define LP_RUNNING_W1C 0x0000000000000068UL
-#define LP_RUNNING_1 0x0000000000000002UL
-#define LP_RUNNING_0 0x0000000000000001UL
-
-/* ASI_CORE_RUNNING_STAT - shared */
-#define LP_RUN_STAT 0x0000000000000058UL
-#define LP_RUN_STAT_1 0x0000000000000002UL
-#define LP_RUN_STAT_0 0x0000000000000001UL
-
-/* ASI_XIR_STEERING - shared */
-#define LP_XIR_STEER 0x0000000000000030UL
-#define LP_XIR_STEER_1 0x0000000000000002UL
-#define LP_XIR_STEER_0 0x0000000000000001UL
-
-/* ASI_CMT_ERROR_STEERING - shared */
-#define CMT_ER_STEER 0x0000000000000040UL
-#define CMT_ER_STEER_1 0x0000000000000002UL
-#define CMT_ER_STEER_0 0x0000000000000001UL
-
-#endif /* _SPARC64_CMT_H */
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h
deleted file mode 100644
index f8423039b242..000000000000
--- a/arch/sparc/include/asm/mpmbox.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * mpmbox.h: Interface and defines for the OpenProm mailbox
- * facilities for MP machines under Linux.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_MPMBOX_H
-#define _SPARC_MPMBOX_H
-
-/* The prom allocates, for each CPU on the machine an unsigned
- * byte in physical ram. You probe the device tree prom nodes
- * for these values. The purpose of this byte is to be able to
- * pass messages from one cpu to another.
- */
-
-/* These are the main message types we have to look for in our
- * Cpu mailboxes, based upon these values we decide what course
- * of action to take.
- */
-
-/* The CPU is executing code in the kernel. */
-#define MAILBOX_ISRUNNING 0xf0
-
-/* Another CPU called romvec->pv_exit(), you should call
- * prom_stopcpu() when you see this in your mailbox.
- */
-#define MAILBOX_EXIT 0xfb
-
-/* Another CPU called romvec->pv_enter(), you should call
- * prom_cpuidle() when this is seen.
- */
-#define MAILBOX_GOSPIN 0xfc
-
-/* Another CPU has hit a breakpoint either into kadb or the prom
- * itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
- * at this point.
- */
-#define MAILBOX_BPT_SPIN 0xfd
-
-/* Oh geese, some other nitwit got a damn watchdog reset. The party's
- * over so go call prom_stopcpu().
- */
-#define MAILBOX_WDOG_STOP 0xfe
-
-#ifndef __ASSEMBLY__
-
-/* Handy macro's to determine a cpu's state. */
-
-/* Is the cpu still in Power On Self Test? */
-#define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f)
-
-/* Is the cpu at the 'ok' prompt of the PROM? */
-#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
-
-/* Is the cpu spinning in the PROM? */
-#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
-
-/* Sanity check... This is junk mail, throw it out. */
-#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
-
-/* Is the cpu actively running an application/kernel-code? */
-#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(_SPARC_MPMBOX_H) */
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 5cffdc55f075..3e244f31e56b 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -443,7 +443,7 @@ static int __init vio_init(void)
root_vdev = vio_create_one(hp, root, NULL);
err = -ENODEV;
if (!root_vdev) {
- printk(KERN_ERR "VIO: Coult not create root device.\n");
+ printk(KERN_ERR "VIO: Could not create root device.\n");
goto out_release;
}
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 7e1fef36bde6..e9c670d7a7fe 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -91,11 +91,6 @@ extern void smp_nap(void);
/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
extern void _cpu_idle(void);
-/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
-extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
- unsigned long new_sp,
- unsigned long new_ss10);
-
#else /* __ASSEMBLY__ */
/*
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index c3dd275f25e2..9ab078a4605d 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs);
#ifdef __tilegx__
#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
-#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
#else
#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 9092ce8aa6b4..f8b74ca83b92 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <asm/byteorder.h>
#include <asm/backtrace.h>
#include <asm/tile-desc.h>
#include <arch/abi.h>
@@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
bytes_to_prefetch / sizeof(tile_bundle_bits);
}
- /* Decode the next bundle. */
- bundle.bits = prefetched_bundles[next_bundle++];
+ /*
+ * Decode the next bundle.
+ * TILE always stores instruction bundles in little-endian
+ * mode, even when the chip is running in big-endian mode.
+ */
+ bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
bundle.num_insns =
parse_insn_tile(bundle.bits, pc, bundle.insns);
num_info_ops = bt_get_info_ops(&bundle, info_operands);
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 133c4b56a99e..c31637baff28 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current)
jrp lr /* keep backtracer happy */
STD_ENDPROC(KBacktraceIterator_init_current)
-/*
- * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
- * free the old stack (passed in r0) and re-invoke cpu_idle().
- * We update sp and ksp0 simultaneously to avoid backtracer warnings.
- */
-STD_ENTRY(cpu_idle_on_new_stack)
- {
- move sp, r1
- mtspr SPR_SYSTEM_SAVE_K_0, r2
- }
- jal free_thread_info
- j cpu_idle
- STD_ENDPROC(cpu_idle_on_new_stack)
-
/* Loop forever on a nap during SMP boot. */
STD_ENTRY(smp_nap)
nap
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6098ccc59be2..dd87f3420390 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -29,6 +29,7 @@
#include <linux/smp.h>
#include <linux/timex.h>
#include <linux/hugetlb.h>
+#include <linux/start_kernel.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 88e466b159dc..43b39d61b538 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -705,7 +705,6 @@ static void stack_proc(void *arg)
struct task_struct *from = current, *to = arg;
to->thread.saved_task = from;
- rcu_switch_from(from);
switch_to(from, to, from);
}
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 8bbea6aa40d9..efe5acfc79c3 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -94,10 +94,10 @@ bs_die:
.section ".bsdata", "a"
bugger_off_msg:
- .ascii "Direct booting from floppy is no longer supported.\r\n"
- .ascii "Please use a boot loader program instead.\r\n"
+ .ascii "Direct floppy boot is not supported. "
+ .ascii "Use a boot loader program instead.\r\n"
.ascii "\n"
- .ascii "Remove disk and press any key to reboot . . .\r\n"
+ .ascii "Remove disk and press any key to reboot ...\r\n"
.byte 0
#ifdef CONFIG_EFI_STUB
@@ -111,7 +111,7 @@ coff_header:
#else
.word 0x8664 # x86-64
#endif
- .word 2 # nr_sections
+ .word 3 # nr_sections
.long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols
@@ -158,8 +158,8 @@ extra_header_fields:
#else
.quad 0 # ImageBase
#endif
- .long 0x1000 # SectionAlignment
- .long 0x200 # FileAlignment
+ .long 0x20 # SectionAlignment
+ .long 0x20 # FileAlignment
.word 0 # MajorOperatingSystemVersion
.word 0 # MinorOperatingSystemVersion
.word 0 # MajorImageVersion
@@ -200,8 +200,10 @@ extra_header_fields:
# Section table
section_table:
- .ascii ".text"
- .byte 0
+ #
+ # The offset & size fields are filled in by build.c.
+ #
+ .ascii ".setup"
.byte 0
.byte 0
.long 0
@@ -217,9 +219,8 @@ section_table:
#
# The EFI application loader requires a relocation section
- # because EFI applications must be relocatable. But since
- # we don't need the loader to fixup any relocs for us, we
- # just create an empty (zero-length) .reloc section header.
+ # because EFI applications must be relocatable. The .reloc
+ # offset & size fields are filled in by build.c.
#
.ascii ".reloc"
.byte 0
@@ -233,6 +234,25 @@ section_table:
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0x42100040 # Characteristics (section flags)
+
+ #
+ # The offset & size fields are filled in by build.c.
+ #
+ .ascii ".text"
+ .byte 0
+ .byte 0
+ .byte 0
+ .long 0
+ .long 0x0 # startup_{32,64}
+ .long 0 # Size of initialized data
+ # on disk
+ .long 0x0 # startup_{32,64}
+ .long 0 # PointerToRelocations
+ .long 0 # PointerToLineNumbers
+ .word 0 # NumberOfRelocations
+ .word 0 # NumberOfLineNumbers
+ .long 0x60500020 # Characteristics (section flags)
+
#endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 3f61f6e2b46f..4b8e165ee572 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -50,6 +50,8 @@ typedef unsigned int u32;
u8 buf[SETUP_SECT_MAX*512];
int is_big_kernel;
+#define PECOFF_RELOC_RESERVE 0x20
+
/*----------------------------------------------------------------------*/
static const u32 crctab32[] = {
@@ -133,11 +135,103 @@ static void usage(void)
die("Usage: build setup system [> image]");
}
-int main(int argc, char ** argv)
-{
#ifdef CONFIG_EFI_STUB
- unsigned int file_sz, pe_header;
+
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+ unsigned int pe_header;
+ unsigned short num_sections;
+ u8 *section;
+
+ pe_header = get_unaligned_le32(&buf[0x3c]);
+ num_sections = get_unaligned_le16(&buf[pe_header + 6]);
+
+#ifdef CONFIG_X86_32
+ section = &buf[pe_header + 0xa8];
+#else
+ section = &buf[pe_header + 0xb8];
#endif
+
+ while (num_sections > 0) {
+ if (strncmp((char*)section, section_name, 8) == 0) {
+ /* section header size field */
+ put_unaligned_le32(size, section + 0x8);
+
+ /* section header vma field */
+ put_unaligned_le32(offset, section + 0xc);
+
+ /* section header 'size of initialised data' field */
+ put_unaligned_le32(size, section + 0x10);
+
+ /* section header 'file offset' field */
+ put_unaligned_le32(offset, section + 0x14);
+
+ break;
+ }
+ section += 0x28;
+ num_sections--;
+ }
+}
+
+static void update_pecoff_setup_and_reloc(unsigned int size)
+{
+ u32 setup_offset = 0x200;
+ u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
+ u32 setup_size = reloc_offset - setup_offset;
+
+ update_pecoff_section_header(".setup", setup_offset, setup_size);
+ update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
+
+ /*
+ * Modify .reloc section contents with a single entry. The
+ * relocation is applied to offset 10 of the relocation section.
+ */
+ put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
+ put_unaligned_le32(10, &buf[reloc_offset + 4]);
+}
+
+static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+{
+ unsigned int pe_header;
+ unsigned int text_sz = file_sz - text_start;
+
+ pe_header = get_unaligned_le32(&buf[0x3c]);
+
+ /* Size of image */
+ put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+
+ /*
+ * Size of code: Subtract the size of the first sector (512 bytes)
+ * which includes the header.
+ */
+ put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Address of entry point.
+ *
+ * The EFI stub entry point is +16 bytes from the start of
+ * the .text section.
+ */
+ put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
+#else
+ /*
+ * Address of entry point. startup_32 is at the beginning and
+ * the 64-bit entry point (startup_64) is always 512 bytes
+ * after. The EFI stub entry point is 16 bytes after that, as
+ * the first instruction allows legacy loaders to jump over
+ * the EFI stub initialisation
+ */
+ put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
+#endif /* CONFIG_X86_32 */
+
+ update_pecoff_section_header(".text", text_start, text_sz);
+}
+
+#endif /* CONFIG_EFI_STUB */
+
+int main(int argc, char ** argv)
+{
unsigned int i, sz, setup_sectors;
int c;
u32 sys_size;
@@ -163,6 +257,12 @@ int main(int argc, char ** argv)
die("Boot block hasn't got boot flag (0xAA55)");
fclose(file);
+#ifdef CONFIG_EFI_STUB
+ /* Reserve 0x20 bytes for .reloc section */
+ memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+ c += PECOFF_RELOC_RESERVE;
+#endif
+
/* Pad unused space with zeros */
setup_sectors = (c + 511) / 512;
if (setup_sectors < SETUP_SECT_MIN)
@@ -170,6 +270,10 @@ int main(int argc, char ** argv)
i = setup_sectors*512;
memset(buf+c, 0, i-c);
+#ifdef CONFIG_EFI_STUB
+ update_pecoff_setup_and_reloc(i);
+#endif
+
/* Set the default root device */
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
@@ -194,66 +298,8 @@ int main(int argc, char ** argv)
put_unaligned_le32(sys_size, &buf[0x1f4]);
#ifdef CONFIG_EFI_STUB
- file_sz = sz + i + ((sys_size * 16) - sz);
-
- pe_header = get_unaligned_le32(&buf[0x3c]);
-
- /* Size of image */
- put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
- /*
- * Subtract the size of the first section (512 bytes) which
- * includes the header and .reloc section. The remaining size
- * is that of the .text section.
- */
- file_sz -= 512;
-
- /* Size of code */
- put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
-
-#ifdef CONFIG_X86_32
- /*
- * Address of entry point.
- *
- * The EFI stub entry point is +16 bytes from the start of
- * the .text section.
- */
- put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
-
- /* .text size */
- put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
-
- /* .text vma */
- put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
-
- /* .text size of initialised data */
- put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
-
- /* .text file offset */
- put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
-#else
- /*
- * Address of entry point. startup_32 is at the beginning and
- * the 64-bit entry point (startup_64) is always 512 bytes
- * after. The EFI stub entry point is 16 bytes after that, as
- * the first instruction allows legacy loaders to jump over
- * the EFI stub initialisation
- */
- put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
-
- /* .text size */
- put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
-
- /* .text vma */
- put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
-
- /* .text size of initialised data */
- put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
-
- /* .text file offset */
- put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
-#endif /* CONFIG_X86_32 */
-#endif /* CONFIG_EFI_STUB */
+ update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+#endif
crc = partial_crc32(buf, i, crc);
if (fwrite(buf, 1, i, stdout) != i)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index be6d9e365a80..3470624d7835 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
pxor IN3, STATE4
movaps IN4, IV
#else
- pxor (INP), STATE2
- pxor 0x10(INP), STATE3
pxor IN1, STATE4
movaps IN2, IV
+ movups (INP), IN1
+ pxor IN1, STATE2
+ movups 0x10(INP), IN2
+ pxor IN2, STATE3
#endif
movups STATE1, (OUTP)
movups STATE2, 0x10(OUTP)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index daeca56211e3..673ac9b63d6b 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -38,7 +38,7 @@
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err = 0;
- bool ia32 = is_ia32_task();
+ bool ia32 = test_thread_flag(TIF_IA32);
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 340ee49961a6..f91e80f4f180 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -176,7 +176,7 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
/* Virtualization flags: Linux defined, word 8 */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index db7c1f2709a2..2da88c0cda14 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -313,8 +313,8 @@ struct kvm_pmu {
u64 counter_bitmask[2];
u64 global_ctrl_mask;
u8 version;
- struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
- struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+ struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
+ struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274cd..813ed103f45e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
extern unsigned long long native_read_tsc(void);
-extern int native_rdmsr_safe_regs(u32 regs[8]);
-extern int native_wrmsr_safe_regs(u32 regs[8]);
+extern int rdmsr_safe_regs(u32 regs[8]);
+extern int wrmsr_safe_regs(u32 regs[8]);
static __always_inline unsigned long long __native_read_tsc(void)
{
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
return err;
}
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
- u32 gprs[8] = { 0 };
- int err;
-
- gprs[1] = msr;
- gprs[7] = 0x9c5a203a;
-
- err = native_rdmsr_safe_regs(gprs);
-
- *p = gprs[0] | ((u64)gprs[2] << 32);
-
- return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
- u32 gprs[8] = { 0 };
-
- gprs[0] = (u32)val;
- gprs[1] = msr;
- gprs[2] = val >> 32;
- gprs[7] = 0x9c5a203a;
-
- return native_wrmsr_safe_regs(gprs);
-}
-
-static inline int rdmsr_safe_regs(u32 regs[8])
-{
- return native_rdmsr_safe_regs(regs);
-}
-
-static inline int wrmsr_safe_regs(u32 regs[8])
-{
- return native_wrmsr_safe_regs(regs);
-}
-
#define rdtscl(low) \
((low) = (u32)__native_read_tsc())
@@ -237,6 +200,8 @@ do { \
(high) = (u32)(_l >> 32); \
} while (0)
+#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
@@ -248,8 +213,7 @@ do { \
#endif /* !CONFIG_PARAVIRT */
-
-#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32))
#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 0e3793b821ef..dc580c42851c 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -54,6 +54,20 @@ struct nmiaction {
__register_nmi_handler((t), &fn##_na); \
})
+/*
+ * For special handlers that register/unregister in the
+ * init section only. This should be considered rare.
+ */
+#define register_nmi_handler_initonly(t, fn, fg, n) \
+({ \
+ static struct nmiaction fn##_na __initdata = { \
+ .handler = (fn), \
+ .name = (n), \
+ .flags = (fg), \
+ }; \
+ __register_nmi_handler((t), &fn##_na); \
+})
+
int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 6cbbabf52707..0b47ddb6f00b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
}
-static inline int paravirt_rdmsr_regs(u32 *regs)
-{
- return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
-}
-
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
}
-static inline int paravirt_wrmsr_regs(u32 *regs)
-{
- return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
-}
-
/* These should all do BUG_ON(_err), but our headers are too tangled. */
#define rdmsr(msr, val1, val2) \
do { \
@@ -176,9 +166,6 @@ do { \
_err; \
})
-#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
-#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
-
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
{
int err;
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = paravirt_read_msr(msr, &err);
return err;
}
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
- u32 gprs[8] = { 0 };
- int err;
-
- gprs[1] = msr;
- gprs[7] = 0x9c5a203a;
-
- err = paravirt_rdmsr_regs(gprs);
-
- *p = gprs[0] | ((u64)gprs[2] << 32);
-
- return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
- u32 gprs[8] = { 0 };
-
- gprs[0] = (u32)val;
- gprs[1] = msr;
- gprs[2] = val >> 32;
- gprs[7] = 0x9c5a203a;
-
- return paravirt_wrmsr_regs(gprs);
-}
static inline u64 paravirt_read_tsc(void)
{
@@ -252,6 +213,8 @@ do { \
high = _l >> 32; \
} while (0)
+#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+
static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4987ee..8613cbb7ba41 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -153,9 +153,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64 (*read_msr)(unsigned int msr, int *err);
- int (*rdmsr_regs)(u32 *regs);
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
- int (*wrmsr_regs)(u32 *regs);
u64 (*read_tsc)(void);
u64 (*read_pmc)(int counter);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 588f52ea810e..c78f14a0df00 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,11 +5,10 @@
* Performance event hw details:
*/
-#define X86_PMC_MAX_GENERIC 32
-#define X86_PMC_MAX_FIXED 3
+#define INTEL_PMC_MAX_GENERIC 32
+#define INTEL_PMC_MAX_FIXED 3
+#define INTEL_PMC_IDX_FIXED 32
-#define X86_PMC_IDX_GENERIC 0
-#define X86_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
@@ -48,8 +47,7 @@
(X86_RAW_EVENT_MASK | \
AMD64_EVENTSEL_EVENT)
#define AMD64_NUM_COUNTERS 4
-#define AMD64_NUM_COUNTERS_F15H 6
-#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
+#define AMD64_NUM_COUNTERS_CORE 6
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -121,16 +119,16 @@ struct x86_pmu_capability {
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
+#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
+#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
-#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
-#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
+#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
+#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
/*
* We model BTS tracing as another fixed-mode PMC.
@@ -139,7 +137,7 @@ struct x86_pmu_capability {
* values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/
-#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
+#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
/*
* IBS cpuid feature detection
@@ -234,6 +232,7 @@ struct perf_guest_switch_msr {
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
+extern void perf_check_microcode(void);
#else
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
@@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
}
static inline void perf_events_lapic_init(void) { }
+static inline void perf_check_microcode(void) { }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 43876f16caf1..cb00ccc7d571 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
* they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
* operations.
*
- * Without THP if the mmap_sem is hold for reading, the
- * pmd can only transition from null to not null while pmd_read_atomic runs.
- * So there's no need of literally reading it atomically.
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
*
* With THP if the mmap_sem is hold for reading, the pmd can become
- * THP or null or point to a pte (and in turn become "stable") at any
- * time under pmd_read_atomic, so it's mandatory to read it atomically
- * with cmpxchg8b.
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
*/
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
{
pmdval_t ret;
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
return (pmd_t) { ret };
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
- return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 04cd6882308e..e1f3a17034fc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -33,9 +33,8 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
#define user_addr_max() (current_thread_info()->addr_limit.seg)
-#define __addr_ok(addr) \
- ((unsigned long __force)(addr) < \
- (current_thread_info()->addr_limit.seg))
+#define __addr_ok(addr) \
+ ((unsigned long __force)(addr) < user_addr_max())
/*
* Test whether a block of memory is a valid user space address.
@@ -47,14 +46,14 @@
* This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
*/
-#define __range_not_ok(addr, size) \
+#define __range_not_ok(addr, size, limit) \
({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
: "=&r" (flag), "=r" (roksum) \
: "1" (addr), "g" ((long)(size)), \
- "rm" (current_thread_info()->addr_limit.seg)); \
+ "rm" (limit)); \
flag; \
})
@@ -77,7 +76,8 @@
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+#define access_ok(type, addr, size) \
+ (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
/*
* The exception table consists of pairs of addresses relative to the
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 1e9bed14f7ae..f3971bbcd1de 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -48,7 +48,7 @@ struct arch_uprobe_task {
#endif
};
-extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index becf47b81735..6149b476d9df 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -149,7 +149,6 @@
/* 4 bits of software ack period */
#define UV2_ACK_MASK 0x7UL
#define UV2_ACK_UNITS_SHFT 3
-#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
/*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8afb69319815..b2297e58c6ed 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -422,12 +422,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ printk(PREFIX "BIOS IRQ0 override ignored.\n");
return 0;
}
- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+ if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+ && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
@@ -1334,17 +1336,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
}
/*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
*/
static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
{
- /*
- * The ati_ixp4x0_rev() early PCI quirk should have set
- * the acpi_skip_timer_override flag already:
- */
if (!acpi_skip_timer_override) {
- WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
- pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+ pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
d->ident);
acpi_skip_timer_override = 1;
}
@@ -1438,7 +1435,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
* is enabled. This input is incorrectly designated the
* ISA IRQ 0 via an interrupt source override even though
* it is wired to the output of the master 8259A and INTIN0
- * is not connected at all. Force ignoring BIOS IRQ0 pin2
+ * is not connected at all. Force ignoring BIOS IRQ0
* override in that cases.
*/
{
@@ -1473,6 +1470,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
},
},
+ {
+ .callback = dmi_ignore_irq0_timer_override,
+ .ident = "FUJITSU SIEMENS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ },
+ },
{}
};
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 6e76c191a835..d5fd66f0d4cd 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -20,7 +20,6 @@
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/suspend.h>
-#include <linux/kmemleak.h>
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/iommu.h>
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void)
return 0;
}
memblock_reserve(addr, aper_size);
- /*
- * Kmemleak should not scan this block as it may not be mapped via the
- * kernel direct mapping.
- */
- kmemleak_ignore(phys_to_virt(addr));
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
insert_aperture_resource((u32)addr, aper_size);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561d1a99..5f0ff597437c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
BUG_ON(!cfg->vector);
vector = cfg->vector;
- for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+ for_each_cpu(cpu, cfg->domain)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = 0;
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
- for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+ for_each_cpu(cpu, cfg->old_domain) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6ab6aa2fdfdd..bac4c3804cc7 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -32,7 +32,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifdef CONFIG_PERF_EVENTS
obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
endif
obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 146bb6218eec..9d92e19039f0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -19,6 +19,39 @@
#include "cpu.h"
+static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+{
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u32 gprs[8] = { 0 };
+ int err;
+
+ WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+ gprs[1] = msr;
+ gprs[7] = 0x9c5a203a;
+
+ err = rdmsr_safe_regs(gprs);
+
+ *p = gprs[0] | ((u64)gprs[2] << 32);
+
+ return err;
+}
+
+static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+{
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u32 gprs[8] = { 0 };
+
+ WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+ gprs[0] = (u32)val;
+ gprs[1] = msr;
+ gprs[2] = val >> 32;
+ gprs[7] = 0x9c5a203a;
+
+ return wrmsr_safe_regs(gprs);
+}
+
#ifdef CONFIG_X86_32
/*
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
u64 val;
- if (!rdmsrl_amd_safe(0xc0011005, &val)) {
+ if (!rdmsrl_safe(0xc0011005, &val)) {
val |= 1ULL << 54;
- wrmsrl_amd_safe(0xc0011005, val);
+ wrmsrl_safe(0xc0011005, val);
rdmsrl(0xc0011005, val);
if (val & (1ULL << 54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
if (err == 0) {
mask |= (1 << 10);
- checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
}
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6b9333b429ba..5bbc082c47ad 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void)
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
- if (rdmsrl_amd_safe(index, &val))
+ if (rdmsrl_safe(index, &val))
continue;
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 0a687fd185e6..da27c5d2168a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data)
*/
iv = __this_cpu_read(mce_next_interval);
if (mce_notify_irq())
- iv = max(iv, (unsigned long) HZ/100);
+ iv = max(iv / 2, (unsigned long) HZ/100);
else
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
__this_cpu_write(mce_next_interval, iv);
@@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
- unsigned long iv = __this_cpu_read(mce_next_interval);
+ unsigned long iv = check_interval * HZ;
setup_timer(t, mce_timer_fn, smp_processor_id());
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
index dfea390e1608..c7b3fe2d72e0 100644
--- a/arch/x86/kernel/cpu/mkcapflags.pl
+++ b/arch/x86/kernel/cpu/mkcapflags.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
#
# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
#
@@ -11,22 +11,35 @@ open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
print OUT "#include <asm/cpufeature.h>\n\n";
print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
+%features = ();
+$err = 0;
+
while (defined($line = <IN>)) {
if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
$macro = $1;
- $feature = $2;
+ $feature = "\L$2";
$tail = $3;
if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
- $feature = $1;
+ $feature = "\L$1";
}
- if ($feature ne '') {
- printf OUT "\t%-32s = \"%s\",\n",
- "[$macro]", "\L$feature";
+ next if ($feature eq '');
+
+ if ($features{$feature}++) {
+ print STDERR "$in: duplicate feature name: $feature\n";
+ $err++;
}
+ printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
}
}
print OUT "};\n";
close(IN);
close(OUT);
+
+if ($err) {
+ unlink($out);
+ exit(1);
+}
+
+exit(0);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e049d6da0183..29557aa06dda 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -35,17 +35,6 @@
#include "perf_event.h"
-#if 0
-#undef wrmsrl
-#define wrmsrl(msr, val) \
-do { \
- trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
- (unsigned long)(val)); \
- native_write_msr((msr), (u32)((u64)(val)), \
- (u32)((u64)(val) >> 32)); \
-} while (0)
-#endif
-
struct x86_pmu x86_pmu __read_mostly;
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -74,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
int idx = hwc->idx;
s64 delta;
- if (idx == X86_PMC_IDX_FIXED_BTS)
+ if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;
/*
@@ -86,7 +75,7 @@ u64 x86_perf_event_update(struct perf_event *event)
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
- rdmsrl(hwc->event_base, new_raw_count);
+ rdpmcl(hwc->event_base_rdpmc, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -189,7 +178,7 @@ static void release_pmc_hardware(void) {}
static bool check_hw_exists(void)
{
- u64 val, val_new = 0;
+ u64 val, val_new = ~0;
int i, reg, ret = 0;
/*
@@ -222,8 +211,9 @@ static bool check_hw_exists(void)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
- ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
- ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
+ reg = x86_pmu_event_addr(0);
+ ret = wrmsrl_safe(reg, val);
+ ret |= rdmsrl_safe(reg, &val_new);
if (ret || val != val_new)
goto msr_fail;
@@ -240,6 +230,7 @@ bios_fail:
msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
+ printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
return false;
}
@@ -388,7 +379,7 @@ int x86_pmu_hw_config(struct perf_event *event)
int precise = 0;
/* Support for constant skid */
- if (x86_pmu.pebs_active) {
+ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
precise++;
/* Support for IP fixup */
@@ -637,8 +628,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
c = sched->constraints[sched->state.event];
/* Prefer fixed purpose counters */
- if (x86_pmu.num_counters_fixed) {
- idx = X86_PMC_IDX_FIXED;
+ if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
+ idx = INTEL_PMC_IDX_FIXED;
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
if (!__test_and_set_bit(idx, sched->state.used))
goto done;
@@ -646,7 +637,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
}
/* Grab the first unused counter starting with idx */
idx = sched->state.counter;
- for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
+ for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
if (!__test_and_set_bit(idx, sched->state.used))
goto done;
}
@@ -704,8 +695,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
/*
* Assign a counter for each event.
*/
-static int perf_assign_events(struct event_constraint **constraints, int n,
- int wmin, int wmax, int *assign)
+int perf_assign_events(struct event_constraint **constraints, int n,
+ int wmin, int wmax, int *assign)
{
struct perf_sched sched;
@@ -824,15 +815,17 @@ static inline void x86_assign_hw_event(struct perf_event *event,
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
- if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
+ if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
- } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
+ } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
+ hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
+ hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
+ hwc->event_base_rdpmc = hwc->idx;
}
}
@@ -930,7 +923,7 @@ int x86_perf_event_set_period(struct perf_event *event)
s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx;
- if (idx == X86_PMC_IDX_FIXED_BTS)
+ if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;
/*
@@ -1316,7 +1309,6 @@ static struct attribute_group x86_pmu_format_group = {
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
- struct event_constraint *c;
int err;
pr_info("Performance Events: ");
@@ -1347,21 +1339,8 @@ static int __init init_hw_perf_events(void)
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
- if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
- }
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-
- if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
- }
-
- x86_pmu.intel_ctrl |=
- ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+ if (!x86_pmu.intel_ctrl)
+ x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1370,22 +1349,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0);
- if (x86_pmu.event_constraints) {
- /*
- * event on fixed counter2 (REF_CYCLES) only works on this
- * counter, so do not extend mask to generic counters
- */
- for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != X86_RAW_EVENT_MASK
- || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
- continue;
- }
-
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
- c->weight += x86_pmu.num_counters;
- }
- }
-
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
@@ -1496,6 +1459,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
if (!cpuc->shared_regs)
goto error;
}
+ cpuc->is_fake = 1;
return cpuc;
error:
free_fake_cpuc(cpuc);
@@ -1619,8 +1583,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
if (!x86_pmu.attr_rdpmc)
return 0;
- if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
- idx -= X86_PMC_IDX_FIXED;
+ if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
+ idx -= INTEL_PMC_IDX_FIXED;
idx |= 1 << 30;
}
@@ -1648,7 +1612,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned long val = simple_strtoul(buf, NULL, 0);
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
@@ -1681,13 +1650,20 @@ static void x86_pmu_flush_branch_stack(void)
x86_pmu.flush_branch_stack();
}
+void perf_check_microcode(void)
+{
+ if (x86_pmu.check_microcode)
+ x86_pmu.check_microcode();
+}
+EXPORT_SYMBOL_GPL(perf_check_microcode);
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
- .attr_groups = x86_pmu_attr_groups,
+ .attr_groups = x86_pmu_attr_groups,
- .event_init = x86_pmu_event_init,
+ .event_init = x86_pmu_event_init,
.add = x86_pmu_add,
.del = x86_pmu_del,
@@ -1695,11 +1671,11 @@ static struct pmu pmu = {
.stop = x86_pmu_stop,
.read = x86_pmu_read,
- .start_txn = x86_pmu_start_txn,
- .cancel_txn = x86_pmu_cancel_txn,
- .commit_txn = x86_pmu_commit_txn,
+ .start_txn = x86_pmu_start_txn,
+ .cancel_txn = x86_pmu_cancel_txn,
+ .commit_txn = x86_pmu_commit_txn,
- .event_idx = x86_pmu_event_idx,
+ .event_idx = x86_pmu_event_idx,
.flush_branch_stack = x86_pmu_flush_branch_stack,
};
@@ -1756,6 +1732,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
}
+static inline int
+valid_user_frame(const void __user *fp, unsigned long size)
+{
+ return (__range_not_ok(fp, size, TASK_SIZE) == 0);
+}
+
#ifdef CONFIG_COMPAT
#include <asm/compat.h>
@@ -1780,7 +1762,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
if (bytes != sizeof(frame))
break;
- if (fp < compat_ptr(regs->sp))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
perf_callchain_store(entry, frame.return_address);
@@ -1826,7 +1808,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
if (bytes != sizeof(frame))
break;
- if ((unsigned long)fp < regs->sp)
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
perf_callchain_store(entry, frame.return_address);
@@ -1856,7 +1838,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
- if (user_mode(regs))
+ if (!kernel_ip(regs->ip))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6638aaf54493..a15df4be151f 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -14,6 +14,18 @@
#include <linux/perf_event.h>
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val) \
+do { \
+ unsigned int _msr = (msr); \
+ u64 _val = (val); \
+ trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
+ (unsigned long long)(_val)); \
+ native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
+} while (0)
+#endif
+
/*
* | NHM/WSM | SNB |
* register -------------------------------
@@ -57,7 +69,7 @@ struct amd_nb {
};
/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS 4
+#define MAX_PEBS_EVENTS 8
/*
* A debug store configuration.
@@ -117,6 +129,7 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
unsigned int group_flag;
+ int is_fake;
/*
* Intel DebugStore bits
@@ -348,6 +361,8 @@ struct x86_pmu {
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
+
+ void (*check_microcode)(void);
void (*flush_branch_stack)(void);
/*
@@ -359,11 +374,16 @@ struct x86_pmu {
/*
* Intel DebugStore bits
*/
- int bts, pebs;
- int bts_active, pebs_active;
+ int bts :1,
+ bts_active :1,
+ pebs :1,
+ pebs_active :1,
+ pebs_broken :1;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
+ void (*pebs_aliases)(struct perf_event *event);
+ int max_pebs_events;
/*
* Intel LBR
@@ -466,6 +486,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
void x86_pmu_enable_all(int added);
+int perf_assign_events(struct event_constraint **constraints, int n,
+ int wmin, int wmax, int *assign);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
void x86_pmu_stop(struct perf_event *event, int flags);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 11a4eb9131d5..4528ae7b6ec4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -366,7 +366,7 @@ static void amd_pmu_cpu_starting(int cpu)
cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
- if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
+ if (boot_cpu_data.x86_max_cores < 2)
return;
nb_id = amd_get_nb_id(cpu);
@@ -422,35 +422,6 @@ static struct attribute *amd_format_attr[] = {
NULL,
};
-static __initconst const struct x86_pmu amd_pmu = {
- .name = "AMD",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = x86_pmu_enable_all,
- .enable = x86_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = amd_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_K7_EVNTSEL0,
- .perfctr = MSR_K7_PERFCTR0,
- .event_map = amd_pmu_event_map,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS,
- .cntval_bits = 48,
- .cntval_mask = (1ULL << 48) - 1,
- .apic = 1,
- /* use highest bit to detect overflow */
- .max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints,
- .put_event_constraints = amd_put_event_constraints,
-
- .format_attrs = amd_format_attr,
-
- .cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_starting = amd_pmu_cpu_starting,
- .cpu_dead = amd_pmu_cpu_dead,
-};
-
/* AMD Family 15h */
#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
@@ -597,8 +568,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
}
}
-static __initconst const struct x86_pmu amd_pmu_f15h = {
- .name = "AMD Family 15h",
+static __initconst const struct x86_pmu amd_pmu = {
+ .name = "AMD",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
@@ -606,50 +577,68 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
.disable = x86_pmu_disable_event,
.hw_config = amd_pmu_hw_config,
.schedule_events = x86_schedule_events,
- .eventsel = MSR_F15H_PERF_CTL,
- .perfctr = MSR_F15H_PERF_CTR,
+ .eventsel = MSR_K7_EVNTSEL0,
+ .perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS_F15H,
+ .num_counters = AMD64_NUM_COUNTERS,
.cntval_bits = 48,
.cntval_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints_f15h,
- /* nortbridge counters not yet implemented: */
-#if 0
+ .get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints,
+ .format_attrs = amd_format_attr,
+
.cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_dead = amd_pmu_cpu_dead,
-#endif
.cpu_starting = amd_pmu_cpu_starting,
- .format_attrs = amd_format_attr,
+ .cpu_dead = amd_pmu_cpu_dead,
};
+static int setup_event_constraints(void)
+{
+ if (boot_cpu_data.x86 >= 0x15)
+ x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+ return 0;
+}
+
+static int setup_perfctr_core(void)
+{
+ if (!cpu_has_perfctr_core) {
+ WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
+ KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
+ return -ENODEV;
+ }
+
+ WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
+ KERN_ERR "hw perf events core counters need constraints handler!");
+
+ /*
+ * If core performance counter extensions exists, we must use
+ * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
+ * x86_pmu_addr_offset().
+ */
+ x86_pmu.eventsel = MSR_F15H_PERF_CTL;
+ x86_pmu.perfctr = MSR_F15H_PERF_CTR;
+ x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
+
+ printk(KERN_INFO "perf: AMD core performance counters detected\n");
+
+ return 0;
+}
+
__init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
if (boot_cpu_data.x86 < 6)
return -ENODEV;
- /*
- * If core performance counter extensions exists, it must be
- * family 15h, otherwise fail. See x86_pmu_addr_offset().
- */
- switch (boot_cpu_data.x86) {
- case 0x15:
- if (!cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu_f15h;
- break;
- default:
- if (cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu;
- break;
- }
+ x86_pmu = amd_pmu;
+
+ setup_event_constraints();
+ setup_perfctr_core();
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546ec6aef..1f4c8add6759 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -21,14 +21,14 @@
*/
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
- [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
};
static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -747,7 +747,7 @@ static void intel_pmu_disable_all(void)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
intel_pmu_pebs_disable_all();
@@ -763,9 +763,9 @@ static void intel_pmu_enable_all(int added)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
- cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
if (WARN_ON_ONCE(!event))
return;
@@ -871,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack)
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
- int idx = hwc->idx - X86_PMC_IDX_FIXED;
+ int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4);
@@ -886,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+ if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
return;
@@ -915,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
- int idx = hwc->idx - X86_PMC_IDX_FIXED;
+ int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
/*
@@ -949,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+ if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
return;
@@ -1003,11 +1003,11 @@ static void intel_pmu_reset(void)
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
- checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
+ wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
+ wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
- checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
if (ds)
ds->bts_index = ds->bts_buffer_base;
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
return NULL;
}
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+static int intel_alt_er(int idx)
{
if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
- return false;
+ return idx;
- if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
- event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= 0x01bb;
- event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
- event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
- } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+ if (idx == EXTRA_REG_RSP_0)
+ return EXTRA_REG_RSP_1;
+
+ if (idx == EXTRA_REG_RSP_1)
+ return EXTRA_REG_RSP_0;
+
+ return idx;
+}
+
+static void intel_fixup_er(struct perf_event *event, int idx)
+{
+ event->hw.extra_reg.idx = idx;
+
+ if (idx == EXTRA_REG_RSP_0) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= 0x01b7;
- event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
+ } else if (idx == EXTRA_REG_RSP_1) {
+ event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+ event->hw.config |= 0x01bb;
+ event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
}
-
- if (event->hw.extra_reg.idx == orig_idx)
- return false;
-
- return true;
}
/*
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
struct event_constraint *c = &emptyconstraint;
struct er_account *era;
unsigned long flags;
- int orig_idx = reg->idx;
+ int idx = reg->idx;
- /* already allocated shared msr */
- if (reg->alloc)
+ /*
+ * reg->alloc can be set due to existing state, so for fake cpuc we
+ * need to ignore this, otherwise we might fail to allocate proper fake
+ * state for this extra reg constraint. Also see the comment below.
+ */
+ if (reg->alloc && !cpuc->is_fake)
return NULL; /* call x86_get_event_constraint() */
again:
- era = &cpuc->shared_regs->regs[reg->idx];
+ era = &cpuc->shared_regs->regs[idx];
/*
* we use spin_lock_irqsave() to avoid lockdep issues when
* passing a fake cpuc
@@ -1173,6 +1183,29 @@ again:
if (!atomic_read(&era->ref) || era->config == reg->config) {
+ /*
+ * If its a fake cpuc -- as per validate_{group,event}() we
+ * shouldn't touch event state and we can avoid doing so
+ * since both will only call get_event_constraints() once
+ * on each event, this avoids the need for reg->alloc.
+ *
+ * Not doing the ER fixup will only result in era->reg being
+ * wrong, but since we won't actually try and program hardware
+ * this isn't a problem either.
+ */
+ if (!cpuc->is_fake) {
+ if (idx != reg->idx)
+ intel_fixup_er(event, idx);
+
+ /*
+ * x86_schedule_events() can call get_event_constraints()
+ * multiple times on events in the case of incremental
+ * scheduling(). reg->alloc ensures we only do the ER
+ * allocation once.
+ */
+ reg->alloc = 1;
+ }
+
/* lock in msr value */
era->config = reg->config;
era->reg = reg->reg;
@@ -1180,17 +1213,17 @@ again:
/* one more user */
atomic_inc(&era->ref);
- /* no need to reallocate during incremental event scheduling */
- reg->alloc = 1;
-
/*
* need to call x86_get_event_constraint()
* to check if associated event has constraints
*/
c = NULL;
- } else if (intel_try_alt_er(event, orig_idx)) {
- raw_spin_unlock_irqrestore(&era->lock, flags);
- goto again;
+ } else {
+ idx = intel_alt_er(idx);
+ if (idx != reg->idx) {
+ raw_spin_unlock_irqrestore(&era->lock, flags);
+ goto again;
+ }
}
raw_spin_unlock_irqrestore(&era->lock, flags);
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
struct er_account *era;
/*
- * only put constraint if extra reg was actually
- * allocated. Also takes care of event which do
- * not use an extra shared reg
+ * Only put constraint if extra reg was actually allocated. Also takes
+ * care of event which do not use an extra shared reg.
+ *
+ * Also, if this is a fake cpuc we shouldn't touch any event state
+ * (reg->alloc) and we don't care about leaving inconsistent cpuc state
+ * either since it'll be thrown out.
*/
- if (!reg->alloc)
+ if (!reg->alloc || cpuc->is_fake)
return;
era = &cpuc->shared_regs->regs[reg->idx];
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
intel_put_shared_regs_event_constraints(cpuc, event);
}
-static int intel_pmu_hw_config(struct perf_event *event)
+static void intel_pebs_aliases_core2(struct perf_event *event)
{
- int ret = x86_pmu_hw_config(event);
-
- if (ret)
- return ret;
-
- if (event->attr.precise_ip &&
- (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+ if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
/*
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
* (0x003c) so that we can use it with PEBS.
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
*/
u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
+ alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
+ event->hw.config = alt_config;
+ }
+}
+
+static void intel_pebs_aliases_snb(struct perf_event *event)
+{
+ if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+ /*
+ * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
+ * (0x003c) so that we can use it with PEBS.
+ *
+ * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
+ * PEBS capable. However we can use UOPS_RETIRED.ALL
+ * (0x01c2), which is a PEBS capable event, to get the same
+ * count.
+ *
+ * UOPS_RETIRED.ALL counts the number of cycles that retires
+ * CNTMASK micro-ops. By setting CNTMASK to a value (16)
+ * larger than the maximum number of micro-ops that can be
+ * retired per cycle (4) and then inverting the condition, we
+ * count all cycles that retire 16 or less micro-ops, which
+ * is every cycle.
+ *
+ * Thereby we gain a PEBS capable cycle counter.
+ */
+ u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
event->hw.config = alt_config;
}
+}
+
+static int intel_pmu_hw_config(struct perf_event *event)
+{
+ int ret = x86_pmu_hw_config(event);
+
+ if (ret)
+ return ret;
+
+ if (event->attr.precise_ip && x86_pmu.pebs_aliases)
+ x86_pmu.pebs_aliases(event);
if (intel_pmu_needs_lbr_smpl(event)) {
ret = intel_pmu_setup_lbr_filter(event);
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
+ .pebs_aliases = intel_pebs_aliases_core2,
.format_attrs = intel_arch3_formats_attr,
@@ -1643,11 +1712,56 @@ static __init void intel_clovertown_quirk(void)
x86_pmu.pebs_constraints = NULL;
}
+static int intel_snb_pebs_broken(int cpu)
+{
+ u32 rev = UINT_MAX; /* default to broken for unknown models */
+
+ switch (cpu_data(cpu).x86_model) {
+ case 42: /* SNB */
+ rev = 0x28;
+ break;
+
+ case 45: /* SNB-EP */
+ switch (cpu_data(cpu).x86_mask) {
+ case 6: rev = 0x618; break;
+ case 7: rev = 0x70c; break;
+ }
+ }
+
+ return (cpu_data(cpu).microcode < rev);
+}
+
+static void intel_snb_check_microcode(void)
+{
+ int pebs_broken = 0;
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if ((pebs_broken = intel_snb_pebs_broken(cpu)))
+ break;
+ }
+ put_online_cpus();
+
+ if (pebs_broken == x86_pmu.pebs_broken)
+ return;
+
+ /*
+ * Serialized by the microcode lock..
+ */
+ if (x86_pmu.pebs_broken) {
+ pr_info("PEBS enabled due to microcode update\n");
+ x86_pmu.pebs_broken = 0;
+ } else {
+ pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
+ x86_pmu.pebs_broken = 1;
+ }
+}
+
static __init void intel_sandybridge_quirk(void)
{
- printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
- x86_pmu.pebs = 0;
- x86_pmu.pebs_constraints = NULL;
+ x86_pmu.check_microcode = intel_snb_check_microcode;
+ intel_snb_check_microcode();
}
static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
@@ -1696,6 +1810,7 @@ __init int intel_pmu_init(void)
union cpuid10_edx edx;
union cpuid10_eax eax;
union cpuid10_ebx ebx;
+ struct event_constraint *c;
unsigned int unused;
int version;
@@ -1731,6 +1846,8 @@ __init int intel_pmu_init(void)
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
+ x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
@@ -1840,8 +1957,9 @@ __init int intel_pmu_init(void)
break;
case 42: /* SandyBridge */
- x86_add_quirk(intel_sandybridge_quirk);
case 45: /* SandyBridge, "Romely-EP" */
+ x86_add_quirk(intel_sandybridge_quirk);
+ case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -1849,6 +1967,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -1880,5 +1999,37 @@ __init int intel_pmu_init(void)
}
}
+ if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
+ WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+ x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
+ }
+ x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+
+ if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+ WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+ x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
+ }
+
+ x86_pmu.intel_ctrl |=
+ ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+
+ if (x86_pmu.event_constraints) {
+ /*
+ * event on fixed counter2 (REF_CYCLES) only works on this
+ * counter, so do not extend mask to generic counters
+ */
+ for_each_event_constraint(c, x86_pmu.event_constraints) {
+ if (c->cmask != X86_RAW_EVENT_MASK
+ || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+ continue;
+ }
+
+ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ c->weight += x86_pmu.num_counters;
+ }
+ }
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 5a3edc27f6e5..629ae0b7ad90 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
*/
struct event_constraint bts_constraint =
- EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+ EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
void intel_pmu_enable_bts(u64 config)
{
@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
u64 to;
u64 flags;
};
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top;
struct perf_output_handle handle;
struct perf_event_header header;
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
- INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
- INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
- INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
@@ -627,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
- WARN_ON_ONCE(n > 1);
+ WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
at += n - 1;
__intel_pmu_pebs_event(event, iregs, at);
@@ -658,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
- WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+ WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
for ( ; at < top; at++) {
- for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+ for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -677,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
break;
}
- if (!event || bit >= MAX_PEBS_EVENTS)
+ if (!event || bit >= x86_pmu.max_pebs_events)
continue;
__intel_pmu_pebs_event(event, iregs, at);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
new file mode 100644
index 000000000000..19faffc60886
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -0,0 +1,1850 @@
+#include "perf_event_intel_uncore.h"
+
+static struct intel_uncore_type *empty_uncore[] = { NULL, };
+static struct intel_uncore_type **msr_uncores = empty_uncore;
+static struct intel_uncore_type **pci_uncores = empty_uncore;
+/* pci bus to socket mapping */
+static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
+
+static DEFINE_RAW_SPINLOCK(uncore_box_lock);
+
+/* mask of cpus that collect uncore events */
+static cpumask_t uncore_cpu_mask;
+
+/* constraint for the fixed counter */
+static struct event_constraint constraint_fixed =
+ EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
+static struct event_constraint constraint_empty =
+ EVENT_CONSTRAINT(0, 0, 0);
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
+DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
+DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
+DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31");
+
+/* Sandy Bridge-EP uncore support */
+static struct intel_uncore_type snbep_uncore_cbox;
+static struct intel_uncore_type snbep_uncore_pcu;
+
+static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+ u32 config;
+
+ pci_read_config_dword(pdev, box_ctl, &config);
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+ u32 config;
+
+ pci_read_config_dword(pdev, box_ctl, &config);
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config |
+ SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
+ pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+ return count;
+}
+
+static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
+ SNBEP_PMON_BOX_CTL_INT);
+}
+
+static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ u64 config;
+ unsigned msr;
+
+ msr = uncore_msr_box_ctl(box);
+ if (msr) {
+ rdmsrl(msr, config);
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ wrmsrl(msr, config);
+ return;
+ }
+}
+
+static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ u64 config;
+ unsigned msr;
+
+ msr = uncore_msr_box_ctl(box);
+ if (msr) {
+ rdmsrl(msr, config);
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ wrmsrl(msr, config);
+ return;
+ }
+}
+
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE)
+ wrmsrl(reg1->reg, reg1->config);
+
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ wrmsrl(hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ rdmsrl(hwc->event_base, count);
+ return count;
+}
+
+static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ if (msr)
+ wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
+}
+
+static struct event_constraint *
+snbep_uncore_get_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ unsigned long flags;
+ bool ok = false;
+
+ if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc))
+ return NULL;
+
+ er = &box->shared_regs[reg1->idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!atomic_read(&er->ref) || er->config1 == reg1->config) {
+ atomic_inc(&er->ref);
+ er->config1 = reg1->config;
+ ok = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (ok) {
+ if (box->phys_id >= 0)
+ reg1->alloc = 1;
+ return NULL;
+ }
+ return &constraint_empty;
+}
+
+static void snbep_uncore_put_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+
+ if (box->phys_id < 0 || !reg1->alloc)
+ return;
+
+ er = &box->shared_regs[reg1->idx];
+ atomic_dec(&er->ref);
+ reg1->alloc = 0;
+}
+
+static int snbep_uncore_hw_config(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (box->pmu->type == &snbep_uncore_cbox) {
+ reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+ SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 &
+ SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
+ } else if (box->pmu->type == &snbep_uncore_pcu) {
+ reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
+ reg1->config = event->attr.config1 &
+ SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
+ } else {
+ return 0;
+ }
+ reg1->idx = 0;
+ return 0;
+}
+
+static struct attribute *snbep_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_filter_tid.attr,
+ &format_attr_filter_nid.attr,
+ &format_attr_filter_state.attr,
+ &format_attr_filter_opc.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_pcu_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_occ_sel.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge.attr,
+ &format_attr_filter_brand0.attr,
+ &format_attr_filter_brand1.attr,
+ &format_attr_filter_brand2.attr,
+ &format_attr_filter_brand3.attr,
+ NULL,
+};
+
+static struct uncore_event_desc snbep_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
+ { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
+ INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
+ { /* end: all zeroes */ },
+};
+
+static struct attribute_group snbep_uncore_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_cbox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_pcu_formats_attr,
+};
+
+static struct intel_uncore_ops snbep_uncore_msr_ops = {
+ .init_box = snbep_uncore_msr_init_box,
+ .disable_box = snbep_uncore_msr_disable_box,
+ .enable_box = snbep_uncore_msr_enable_box,
+ .disable_event = snbep_uncore_msr_disable_event,
+ .enable_event = snbep_uncore_msr_enable_event,
+ .read_counter = snbep_uncore_msr_read_counter,
+ .get_constraint = snbep_uncore_get_constraint,
+ .put_constraint = snbep_uncore_put_constraint,
+ .hw_config = snbep_uncore_hw_config,
+};
+
+static struct intel_uncore_ops snbep_uncore_pci_ops = {
+ .init_box = snbep_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = snbep_uncore_pci_disable_event,
+ .enable_event = snbep_uncore_pci_enable_event,
+ .read_counter = snbep_uncore_pci_read_counter,
+};
+
+static struct event_constraint snbep_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
+ EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snbep_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_U_MSR_PMON_CTL0,
+ .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+ .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+ .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_ubox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 44,
+ .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
+ .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
+ .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
+ .msr_offset = SNBEP_CBO_MSR_OFFSET,
+ .num_shared_regs = 1,
+ .constraints = snbep_uncore_cbox_constraints,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_cbox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_pcu = {
+ .name = "pcu",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
+ .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *snbep_msr_uncores[] = {
+ &snbep_uncore_ubox,
+ &snbep_uncore_cbox,
+ &snbep_uncore_pcu,
+ NULL,
+};
+
+#define SNBEP_UNCORE_PCI_COMMON_INIT() \
+ .perf_ctr = SNBEP_PCI_PMON_CTR0, \
+ .event_ctl = SNBEP_PCI_PMON_CTL0, \
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
+ .ops = &snbep_uncore_pci_ops, \
+ .format_group = &snbep_uncore_format_group
+
+static struct intel_uncore_type snbep_uncore_ha = {
+ .name = "ha",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_imc = {
+ .name = "imc",
+ .num_counters = 4,
+ .num_boxes = 4,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+ .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+ .event_descs = snbep_uncore_imc_events,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_qpi = {
+ .name = "qpi",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_descs = snbep_uncore_qpi_events,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+
+static struct intel_uncore_type snbep_uncore_r2pcie = {
+ .name = "r2pcie",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r2pcie_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_r3qpi = {
+ .name = "r3qpi",
+ .num_counters = 3,
+ .num_boxes = 2,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r3qpi_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type *snbep_pci_uncores[] = {
+ &snbep_uncore_ha,
+ &snbep_uncore_imc,
+ &snbep_uncore_qpi,
+ &snbep_uncore_r2pcie,
+ &snbep_uncore_r3qpi,
+ NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
+ { /* Home Agent */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
+ .driver_data = (unsigned long)&snbep_uncore_ha,
+ },
+ { /* MC Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* MC Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* MC Channel 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* MC Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* QPI Port 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
+ .driver_data = (unsigned long)&snbep_uncore_qpi,
+ },
+ { /* QPI Port 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
+ .driver_data = (unsigned long)&snbep_uncore_qpi,
+ },
+ { /* P2PCIe */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
+ .driver_data = (unsigned long)&snbep_uncore_r2pcie,
+ },
+ { /* R3QPI Link 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
+ .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+ },
+ { /* R3QPI Link 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
+ .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver snbep_uncore_pci_driver = {
+ .name = "snbep_uncore",
+ .id_table = snbep_uncore_pci_ids,
+};
+
+/*
+ * build pci bus to socket mapping
+ */
+static void snbep_pci2phy_map_init(void)
+{
+ struct pci_dev *ubox_dev = NULL;
+ int i, bus, nodeid;
+ u32 config;
+
+ while (1) {
+ /* find the UBOX device */
+ ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
+ ubox_dev);
+ if (!ubox_dev)
+ break;
+ bus = ubox_dev->bus->number;
+ /* get the Node ID of the local register */
+ pci_read_config_dword(ubox_dev, 0x40, &config);
+ nodeid = config;
+ /* get the Node ID mapping */
+ pci_read_config_dword(ubox_dev, 0x54, &config);
+ /*
+ * every three bits in the Node ID mapping register maps
+ * to a particular node.
+ */
+ for (i = 0; i < 8; i++) {
+ if (nodeid == ((config >> (3 * i)) & 0x7)) {
+ pcibus_to_physid[bus] = i;
+ break;
+ }
+ }
+ };
+ return;
+}
+/* end of Sandy Bridge-EP uncore support */
+
+
+/* Sandy Bridge uncore support */
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ else
+ wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
+}
+
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ wrmsrl(event->hw.config_base, 0);
+}
+
+static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ u64 count;
+ rdmsrl(event->hw.event_base, count);
+ return count;
+}
+
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0) {
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+ SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+ }
+}
+
+static struct attribute *snb_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask5.attr,
+ NULL,
+};
+
+static struct attribute_group snb_uncore_format_group = {
+ .name = "format",
+ .attrs = snb_uncore_formats_attr,
+};
+
+static struct intel_uncore_ops snb_uncore_msr_ops = {
+ .init_box = snb_uncore_msr_init_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = snb_uncore_msr_read_counter,
+};
+
+static struct event_constraint snb_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snb_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 2,
+ .num_boxes = 4,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
+ .constraints = snb_uncore_cbox_constraints,
+ .ops = &snb_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+};
+
+static struct intel_uncore_type *snb_msr_uncores[] = {
+ &snb_uncore_cbox,
+ NULL,
+};
+/* end of Sandy Bridge uncore support */
+
+/* Nehalem uncore support */
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
+ NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+}
+
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ else
+ wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
+}
+
+static struct attribute *nhm_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask8.attr,
+ NULL,
+};
+
+static struct attribute_group nhm_uncore_format_group = {
+ .name = "format",
+ .attrs = nhm_uncore_formats_attr,
+};
+
+static struct uncore_event_desc nhm_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
+ INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
+ .disable_box = nhm_uncore_msr_disable_box,
+ .enable_box = nhm_uncore_msr_enable_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = nhm_uncore_msr_enable_event,
+ .read_counter = snb_uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type nhm_uncore = {
+ .name = "",
+ .num_counters = 8,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .event_ctl = NHM_UNC_PERFEVTSEL0,
+ .perf_ctr = NHM_UNC_UNCORE_PMC0,
+ .fixed_ctr = NHM_UNC_FIXED_CTR,
+ .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
+ .event_mask = NHM_UNC_RAW_EVENT_MASK,
+ .event_descs = nhm_uncore_events,
+ .ops = &nhm_uncore_msr_ops,
+ .format_group = &nhm_uncore_format_group,
+};
+
+static struct intel_uncore_type *nhm_msr_uncores[] = {
+ &nhm_uncore,
+ NULL,
+};
+/* end of Nehalem uncore support */
+
+static void uncore_assign_hw_event(struct intel_uncore_box *box,
+ struct perf_event *event, int idx)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->idx = idx;
+ hwc->last_tag = ++box->tags[idx];
+
+ if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
+ hwc->event_base = uncore_fixed_ctr(box);
+ hwc->config_base = uncore_fixed_ctl(box);
+ return;
+ }
+
+ hwc->config_base = uncore_event_ctl(box, hwc->idx);
+ hwc->event_base = uncore_perf_ctr(box, hwc->idx);
+}
+
+static void uncore_perf_event_update(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ u64 prev_count, new_count, delta;
+ int shift;
+
+ if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+ shift = 64 - uncore_fixed_ctr_bits(box);
+ else
+ shift = 64 - uncore_perf_ctr_bits(box);
+
+ /* the hrtimer might modify the previous event value */
+again:
+ prev_count = local64_read(&event->hw.prev_count);
+ new_count = uncore_read_counter(box, event);
+ if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
+ goto again;
+
+ delta = (new_count << shift) - (prev_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
+/*
+ * The overflow interrupt is unavailable for SandyBridge-EP, is broken
+ * for SandyBridge. So we use hrtimer to periodically poll the counter
+ * to avoid overflow.
+ */
+static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
+{
+ struct intel_uncore_box *box;
+ unsigned long flags;
+ int bit;
+
+ box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
+ if (!box->n_active || box->cpu != smp_processor_id())
+ return HRTIMER_NORESTART;
+ /*
+ * disable local interrupt to prevent uncore_pmu_event_start/stop
+ * to interrupt the update process
+ */
+ local_irq_save(flags);
+
+ for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
+ uncore_perf_event_update(box, box->events[bit]);
+
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
+ return HRTIMER_RESTART;
+}
+
+static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
+{
+ __hrtimer_start_range_ns(&box->hrtimer,
+ ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
+{
+ hrtimer_cancel(&box->hrtimer);
+}
+
+static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
+{
+ hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ box->hrtimer.function = uncore_pmu_hrtimer;
+}
+
+struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
+ int cpu)
+{
+ struct intel_uncore_box *box;
+ int i, size;
+
+ size = sizeof(*box) + type->num_shared_regs *
+ sizeof(struct intel_uncore_extra_reg);
+
+ box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
+ if (!box)
+ return NULL;
+
+ for (i = 0; i < type->num_shared_regs; i++)
+ raw_spin_lock_init(&box->shared_regs[i].lock);
+
+ uncore_pmu_init_hrtimer(box);
+ atomic_set(&box->refcnt, 1);
+ box->cpu = -1;
+ box->phys_id = -1;
+
+ return box;
+}
+
+static struct intel_uncore_box *
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+{
+ static struct intel_uncore_box *box;
+
+ box = *per_cpu_ptr(pmu->box, cpu);
+ if (box)
+ return box;
+
+ raw_spin_lock(&uncore_box_lock);
+ list_for_each_entry(box, &pmu->box_list, list) {
+ if (box->phys_id == topology_physical_package_id(cpu)) {
+ atomic_inc(&box->refcnt);
+ *per_cpu_ptr(pmu->box, cpu) = box;
+ break;
+ }
+ }
+ raw_spin_unlock(&uncore_box_lock);
+
+ return *per_cpu_ptr(pmu->box, cpu);
+}
+
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+ /*
+ * perf core schedules event on the basis of cpu, uncore events are
+ * collected by one of the cpus inside a physical package.
+ */
+ return uncore_pmu_to_box(uncore_event_to_pmu(event),
+ smp_processor_id());
+}
+
+static int uncore_collect_events(struct intel_uncore_box *box,
+ struct perf_event *leader, bool dogrp)
+{
+ struct perf_event *event;
+ int n, max_count;
+
+ max_count = box->pmu->type->num_counters;
+ if (box->pmu->type->fixed_ctl)
+ max_count++;
+
+ if (box->n_events >= max_count)
+ return -EINVAL;
+
+ n = box->n_events;
+ box->event_list[n] = leader;
+ n++;
+ if (!dogrp)
+ return n;
+
+ list_for_each_entry(event, &leader->sibling_list, group_entry) {
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ continue;
+
+ if (n >= max_count)
+ return -EINVAL;
+
+ box->event_list[n] = event;
+ n++;
+ }
+ return n;
+}
+
+static struct event_constraint *
+uncore_get_event_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct intel_uncore_type *type = box->pmu->type;
+ struct event_constraint *c;
+
+ if (type->ops->get_constraint) {
+ c = type->ops->get_constraint(box, event);
+ if (c)
+ return c;
+ }
+
+ if (event->hw.config == ~0ULL)
+ return &constraint_fixed;
+
+ if (type->constraints) {
+ for_each_event_constraint(c, type->constraints) {
+ if ((event->hw.config & c->cmask) == c->code)
+ return c;
+ }
+ }
+
+ return &type->unconstrainted;
+}
+
+static void uncore_put_event_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ if (box->pmu->type->ops->put_constraint)
+ box->pmu->type->ops->put_constraint(box, event);
+}
+
+static int uncore_assign_events(struct intel_uncore_box *box,
+ int assign[], int n)
+{
+ unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+ struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
+ int i, wmin, wmax, ret = 0;
+ struct hw_perf_event *hwc;
+
+ bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
+
+ for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
+ c = uncore_get_event_constraint(box, box->event_list[i]);
+ constraints[i] = c;
+ wmin = min(wmin, c->weight);
+ wmax = max(wmax, c->weight);
+ }
+
+ /* fastpath, try to reuse previous register */
+ for (i = 0; i < n; i++) {
+ hwc = &box->event_list[i]->hw;
+ c = constraints[i];
+
+ /* never assigned */
+ if (hwc->idx == -1)
+ break;
+
+ /* constraint still honored */
+ if (!test_bit(hwc->idx, c->idxmsk))
+ break;
+
+ /* not already used */
+ if (test_bit(hwc->idx, used_mask))
+ break;
+
+ __set_bit(hwc->idx, used_mask);
+ if (assign)
+ assign[i] = hwc->idx;
+ }
+ /* slow path */
+ if (i != n)
+ ret = perf_assign_events(constraints, n, wmin, wmax, assign);
+
+ if (!assign || ret) {
+ for (i = 0; i < n; i++)
+ uncore_put_event_constraint(box, box->event_list[i]);
+ }
+ return ret ? -EINVAL : 0;
+}
+
+static void uncore_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ int idx = event->hw.idx;
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
+ return;
+
+ event->hw.state = 0;
+ box->events[idx] = event;
+ box->n_active++;
+ __set_bit(idx, box->active_mask);
+
+ local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
+ uncore_enable_event(box, event);
+
+ if (box->n_active == 1) {
+ uncore_enable_box(box);
+ uncore_pmu_start_hrtimer(box);
+ }
+}
+
+static void uncore_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
+ uncore_disable_event(box, event);
+ box->n_active--;
+ box->events[hwc->idx] = NULL;
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (box->n_active == 0) {
+ uncore_disable_box(box);
+ uncore_pmu_cancel_hrtimer(box);
+ }
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ uncore_perf_event_update(box, event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int uncore_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+ int assign[UNCORE_PMC_IDX_MAX];
+ int i, n, ret;
+
+ if (!box)
+ return -ENODEV;
+
+ ret = n = uncore_collect_events(box, event, false);
+ if (ret < 0)
+ return ret;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
+
+ ret = uncore_assign_events(box, assign, n);
+ if (ret)
+ return ret;
+
+ /* save events moving to new counters */
+ for (i = 0; i < box->n_events; i++) {
+ event = box->event_list[i];
+ hwc = &event->hw;
+
+ if (hwc->idx == assign[i] &&
+ hwc->last_tag == box->tags[assign[i]])
+ continue;
+ /*
+ * Ensure we don't accidentally enable a stopped
+ * counter simply because we rescheduled.
+ */
+ if (hwc->state & PERF_HES_STOPPED)
+ hwc->state |= PERF_HES_ARCH;
+
+ uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+ }
+
+ /* reprogram moved events into new counters */
+ for (i = 0; i < n; i++) {
+ event = box->event_list[i];
+ hwc = &event->hw;
+
+ if (hwc->idx != assign[i] ||
+ hwc->last_tag != box->tags[assign[i]])
+ uncore_assign_hw_event(box, event, assign[i]);
+ else if (i < box->n_events)
+ continue;
+
+ if (hwc->state & PERF_HES_ARCH)
+ continue;
+
+ uncore_pmu_event_start(event, 0);
+ }
+ box->n_events = n;
+
+ return 0;
+}
+
+static void uncore_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ int i;
+
+ uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+
+ for (i = 0; i < box->n_events; i++) {
+ if (event == box->event_list[i]) {
+ uncore_put_event_constraint(box, event);
+
+ while (++i < box->n_events)
+ box->event_list[i - 1] = box->event_list[i];
+
+ --box->n_events;
+ break;
+ }
+ }
+
+ event->hw.idx = -1;
+ event->hw.last_tag = ~0ULL;
+}
+
+static void uncore_pmu_event_read(struct perf_event *event)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ uncore_perf_event_update(box, event);
+}
+
+/*
+ * validation ensures the group can be loaded onto the
+ * PMU if it was the only group available.
+ */
+static int uncore_validate_group(struct intel_uncore_pmu *pmu,
+ struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct intel_uncore_box *fake_box;
+ int ret = -EINVAL, n;
+
+ fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+ if (!fake_box)
+ return -ENOMEM;
+
+ fake_box->pmu = pmu;
+ /*
+ * the event is not yet connected with its
+ * siblings therefore we must first collect
+ * existing siblings, then add the new event
+ * before we can simulate the scheduling
+ */
+ n = uncore_collect_events(fake_box, leader, true);
+ if (n < 0)
+ goto out;
+
+ fake_box->n_events = n;
+ n = uncore_collect_events(fake_box, event, false);
+ if (n < 0)
+ goto out;
+
+ fake_box->n_events = n;
+
+ ret = uncore_assign_events(fake_box, NULL, n);
+out:
+ kfree(fake_box);
+ return ret;
+}
+
+int uncore_pmu_event_init(struct perf_event *event)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ struct hw_perf_event *hwc = &event->hw;
+ int ret;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ pmu = uncore_event_to_pmu(event);
+ /* no device found for this pmu */
+ if (pmu->func_id < 0)
+ return -ENOENT;
+
+ /*
+ * Uncore PMU does measure at all privilege level all the time.
+ * So it doesn't make sense to specify any exclude bits.
+ */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_hv || event->attr.exclude_idle)
+ return -EINVAL;
+
+ /* Sampling not supported yet */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * Place all uncore events for a particular physical package
+ * onto a single cpu
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+ box = uncore_pmu_to_box(pmu, event->cpu);
+ if (!box || box->cpu < 0)
+ return -EINVAL;
+ event->cpu = box->cpu;
+
+ event->hw.idx = -1;
+ event->hw.last_tag = ~0ULL;
+ event->hw.extra_reg.idx = EXTRA_REG_NONE;
+
+ if (event->attr.config == UNCORE_FIXED_EVENT) {
+ /* no fixed counter */
+ if (!pmu->type->fixed_ctl)
+ return -EINVAL;
+ /*
+ * if there is only one fixed counter, only the first pmu
+ * can access the fixed counter
+ */
+ if (pmu->type->single_fixed && pmu->pmu_idx > 0)
+ return -EINVAL;
+ hwc->config = ~0ULL;
+ } else {
+ hwc->config = event->attr.config & pmu->type->event_mask;
+ if (pmu->type->ops->hw_config) {
+ ret = pmu->type->ops->hw_config(box, event);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (event->group_leader != event)
+ ret = uncore_validate_group(pmu, event);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
+{
+ int ret;
+
+ pmu->pmu = (struct pmu) {
+ .attr_groups = pmu->type->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = uncore_pmu_event_init,
+ .add = uncore_pmu_event_add,
+ .del = uncore_pmu_event_del,
+ .start = uncore_pmu_event_start,
+ .stop = uncore_pmu_event_stop,
+ .read = uncore_pmu_event_read,
+ };
+
+ if (pmu->type->num_boxes == 1) {
+ if (strlen(pmu->type->name) > 0)
+ sprintf(pmu->name, "uncore_%s", pmu->type->name);
+ else
+ sprintf(pmu->name, "uncore");
+ } else {
+ sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
+ pmu->pmu_idx);
+ }
+
+ ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
+ return ret;
+}
+
+static void __init uncore_type_exit(struct intel_uncore_type *type)
+{
+ int i;
+
+ for (i = 0; i < type->num_boxes; i++)
+ free_percpu(type->pmus[i].box);
+ kfree(type->pmus);
+ type->pmus = NULL;
+ kfree(type->attr_groups[1]);
+ type->attr_groups[1] = NULL;
+}
+
+static void uncore_types_exit(struct intel_uncore_type **types)
+{
+ int i;
+ for (i = 0; types[i]; i++)
+ uncore_type_exit(types[i]);
+}
+
+static int __init uncore_type_init(struct intel_uncore_type *type)
+{
+ struct intel_uncore_pmu *pmus;
+ struct attribute_group *events_group;
+ struct attribute **attrs;
+ int i, j;
+
+ pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
+ if (!pmus)
+ return -ENOMEM;
+
+ type->unconstrainted = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
+ 0, type->num_counters, 0);
+
+ for (i = 0; i < type->num_boxes; i++) {
+ pmus[i].func_id = -1;
+ pmus[i].pmu_idx = i;
+ pmus[i].type = type;
+ INIT_LIST_HEAD(&pmus[i].box_list);
+ pmus[i].box = alloc_percpu(struct intel_uncore_box *);
+ if (!pmus[i].box)
+ goto fail;
+ }
+
+ if (type->event_descs) {
+ i = 0;
+ while (type->event_descs[i].attr.attr.name)
+ i++;
+
+ events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+ sizeof(*events_group), GFP_KERNEL);
+ if (!events_group)
+ goto fail;
+
+ attrs = (struct attribute **)(events_group + 1);
+ events_group->name = "events";
+ events_group->attrs = attrs;
+
+ for (j = 0; j < i; j++)
+ attrs[j] = &type->event_descs[j].attr.attr;
+
+ type->attr_groups[1] = events_group;
+ }
+
+ type->pmus = pmus;
+ return 0;
+fail:
+ uncore_type_exit(type);
+ return -ENOMEM;
+}
+
+static int __init uncore_types_init(struct intel_uncore_type **types)
+{
+ int i, ret;
+
+ for (i = 0; types[i]; i++) {
+ ret = uncore_type_init(types[i]);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ while (--i >= 0)
+ uncore_type_exit(types[i]);
+ return ret;
+}
+
+static struct pci_driver *uncore_pci_driver;
+static bool pcidrv_registered;
+
+/*
+ * add a pci uncore device
+ */
+static int __devinit uncore_pci_add(struct intel_uncore_type *type,
+ struct pci_dev *pdev)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, phys_id;
+
+ phys_id = pcibus_to_physid[pdev->bus->number];
+ if (phys_id < 0)
+ return -ENODEV;
+
+ box = uncore_alloc_box(type, 0);
+ if (!box)
+ return -ENOMEM;
+
+ /*
+ * for performance monitoring unit with multiple boxes,
+ * each box has a different function id.
+ */
+ for (i = 0; i < type->num_boxes; i++) {
+ pmu = &type->pmus[i];
+ if (pmu->func_id == pdev->devfn)
+ break;
+ if (pmu->func_id < 0) {
+ pmu->func_id = pdev->devfn;
+ break;
+ }
+ pmu = NULL;
+ }
+
+ if (!pmu) {
+ kfree(box);
+ return -EINVAL;
+ }
+
+ box->phys_id = phys_id;
+ box->pci_dev = pdev;
+ box->pmu = pmu;
+ uncore_box_init(box);
+ pci_set_drvdata(pdev, box);
+
+ raw_spin_lock(&uncore_box_lock);
+ list_add_tail(&box->list, &pmu->box_list);
+ raw_spin_unlock(&uncore_box_lock);
+
+ return 0;
+}
+
+static void uncore_pci_remove(struct pci_dev *pdev)
+{
+ struct intel_uncore_box *box = pci_get_drvdata(pdev);
+ struct intel_uncore_pmu *pmu = box->pmu;
+ int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+
+ if (WARN_ON_ONCE(phys_id != box->phys_id))
+ return;
+
+ raw_spin_lock(&uncore_box_lock);
+ list_del(&box->list);
+ raw_spin_unlock(&uncore_box_lock);
+
+ for_each_possible_cpu(cpu) {
+ if (*per_cpu_ptr(pmu->box, cpu) == box) {
+ *per_cpu_ptr(pmu->box, cpu) = NULL;
+ atomic_dec(&box->refcnt);
+ }
+ }
+
+ WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
+ kfree(box);
+}
+
+static int __devinit uncore_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_uncore_type *type;
+
+ type = (struct intel_uncore_type *)id->driver_data;
+ return uncore_pci_add(type, pdev);
+}
+
+static int __init uncore_pci_init(void)
+{
+ int ret;
+
+ switch (boot_cpu_data.x86_model) {
+ case 45: /* Sandy Bridge-EP */
+ pci_uncores = snbep_pci_uncores;
+ uncore_pci_driver = &snbep_uncore_pci_driver;
+ snbep_pci2phy_map_init();
+ break;
+ default:
+ return 0;
+ }
+
+ ret = uncore_types_init(pci_uncores);
+ if (ret)
+ return ret;
+
+ uncore_pci_driver->probe = uncore_pci_probe;
+ uncore_pci_driver->remove = uncore_pci_remove;
+
+ ret = pci_register_driver(uncore_pci_driver);
+ if (ret == 0)
+ pcidrv_registered = true;
+ else
+ uncore_types_exit(pci_uncores);
+
+ return ret;
+}
+
+static void __init uncore_pci_exit(void)
+{
+ if (pcidrv_registered) {
+ pcidrv_registered = false;
+ pci_unregister_driver(uncore_pci_driver);
+ uncore_types_exit(pci_uncores);
+ }
+}
+
+static void __cpuinit uncore_cpu_dying(int cpu)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, j;
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ box = *per_cpu_ptr(pmu->box, cpu);
+ *per_cpu_ptr(pmu->box, cpu) = NULL;
+ if (box && atomic_dec_and_test(&box->refcnt))
+ kfree(box);
+ }
+ }
+}
+
+static int __cpuinit uncore_cpu_starting(int cpu)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box, *exist;
+ int i, j, k, phys_id;
+
+ phys_id = topology_physical_package_id(cpu);
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ box = *per_cpu_ptr(pmu->box, cpu);
+ /* called by uncore_cpu_init? */
+ if (box && box->phys_id >= 0) {
+ uncore_box_init(box);
+ continue;
+ }
+
+ for_each_online_cpu(k) {
+ exist = *per_cpu_ptr(pmu->box, k);
+ if (exist && exist->phys_id == phys_id) {
+ atomic_inc(&exist->refcnt);
+ *per_cpu_ptr(pmu->box, cpu) = exist;
+ kfree(box);
+ box = NULL;
+ break;
+ }
+ }
+
+ if (box) {
+ box->phys_id = phys_id;
+ uncore_box_init(box);
+ }
+ }
+ }
+ return 0;
+}
+
+static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, j;
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ if (pmu->func_id < 0)
+ pmu->func_id = j;
+
+ box = uncore_alloc_box(type, cpu);
+ if (!box)
+ return -ENOMEM;
+
+ box->pmu = pmu;
+ box->phys_id = phys_id;
+ *per_cpu_ptr(pmu->box, cpu) = box;
+ }
+ }
+ return 0;
+}
+
+static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
+ int old_cpu, int new_cpu)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, j;
+
+ for (i = 0; uncores[i]; i++) {
+ type = uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ if (old_cpu < 0)
+ box = uncore_pmu_to_box(pmu, new_cpu);
+ else
+ box = uncore_pmu_to_box(pmu, old_cpu);
+ if (!box)
+ continue;
+
+ if (old_cpu < 0) {
+ WARN_ON_ONCE(box->cpu != -1);
+ box->cpu = new_cpu;
+ continue;
+ }
+
+ WARN_ON_ONCE(box->cpu != old_cpu);
+ if (new_cpu >= 0) {
+ uncore_pmu_cancel_hrtimer(box);
+ perf_pmu_migrate_context(&pmu->pmu,
+ old_cpu, new_cpu);
+ box->cpu = new_cpu;
+ } else {
+ box->cpu = -1;
+ }
+ }
+ }
+}
+
+static void __cpuinit uncore_event_exit_cpu(int cpu)
+{
+ int i, phys_id, target;
+
+ /* if exiting cpu is used for collecting uncore events */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return;
+
+ /* find a new cpu to collect uncore events */
+ phys_id = topology_physical_package_id(cpu);
+ target = -1;
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (phys_id == topology_physical_package_id(i)) {
+ target = i;
+ break;
+ }
+ }
+
+ /* migrate uncore events to the new cpu */
+ if (target >= 0)
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+
+ uncore_change_context(msr_uncores, cpu, target);
+ uncore_change_context(pci_uncores, cpu, target);
+}
+
+static void __cpuinit uncore_event_init_cpu(int cpu)
+{
+ int i, phys_id;
+
+ phys_id = topology_physical_package_id(cpu);
+ for_each_cpu(i, &uncore_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i))
+ return;
+ }
+
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+ uncore_change_context(msr_uncores, -1, cpu);
+ uncore_change_context(pci_uncores, -1, cpu);
+}
+
+static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ /* allocate/free data structure for uncore box */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ uncore_cpu_prepare(cpu, -1);
+ break;
+ case CPU_STARTING:
+ uncore_cpu_starting(cpu);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DYING:
+ uncore_cpu_dying(cpu);
+ break;
+ default:
+ break;
+ }
+
+ /* select the cpu that collects uncore events */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_FAILED:
+ case CPU_STARTING:
+ uncore_event_init_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ uncore_event_exit_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block uncore_cpu_nb __cpuinitdata = {
+ .notifier_call = uncore_cpu_notifier,
+ /*
+ * to migrate uncore events, our notifier should be executed
+ * before perf core's notifier.
+ */
+ .priority = CPU_PRI_PERF + 1,
+};
+
+static void __init uncore_cpu_setup(void *dummy)
+{
+ uncore_cpu_starting(smp_processor_id());
+}
+
+static int __init uncore_cpu_init(void)
+{
+ int ret, cpu, max_cores;
+
+ max_cores = boot_cpu_data.x86_max_cores;
+ switch (boot_cpu_data.x86_model) {
+ case 26: /* Nehalem */
+ case 30:
+ case 37: /* Westmere */
+ case 44:
+ msr_uncores = nhm_msr_uncores;
+ break;
+ case 42: /* Sandy Bridge */
+ if (snb_uncore_cbox.num_boxes > max_cores)
+ snb_uncore_cbox.num_boxes = max_cores;
+ msr_uncores = snb_msr_uncores;
+ break;
+ case 45: /* Sandy Birdge-EP */
+ if (snbep_uncore_cbox.num_boxes > max_cores)
+ snbep_uncore_cbox.num_boxes = max_cores;
+ msr_uncores = snbep_msr_uncores;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = uncore_types_init(msr_uncores);
+ if (ret)
+ return ret;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ int i, phys_id = topology_physical_package_id(cpu);
+
+ for_each_cpu(i, &uncore_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i)) {
+ phys_id = -1;
+ break;
+ }
+ }
+ if (phys_id < 0)
+ continue;
+
+ uncore_cpu_prepare(cpu, phys_id);
+ uncore_event_init_cpu(cpu);
+ }
+ on_each_cpu(uncore_cpu_setup, NULL, 1);
+
+ register_cpu_notifier(&uncore_cpu_nb);
+
+ put_online_cpus();
+
+ return 0;
+}
+
+static int __init uncore_pmus_register(void)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_type *type;
+ int i, j;
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ uncore_pmu_register(pmu);
+ }
+ }
+
+ for (i = 0; pci_uncores[i]; i++) {
+ type = pci_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ uncore_pmu_register(pmu);
+ }
+ }
+
+ return 0;
+}
+
+static int __init intel_uncore_init(void)
+{
+ int ret;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+ ret = uncore_pci_init();
+ if (ret)
+ goto fail;
+ ret = uncore_cpu_init();
+ if (ret) {
+ uncore_pci_exit();
+ goto fail;
+ }
+
+ uncore_pmus_register();
+ return 0;
+fail:
+ return ret;
+}
+device_initcall(intel_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
new file mode 100644
index 000000000000..b13e9ea81def
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -0,0 +1,424 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+#include "perf_event.h"
+
+#define UNCORE_PMU_NAME_LEN 32
+#define UNCORE_BOX_HASH_SIZE 8
+
+#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
+
+#define UNCORE_FIXED_EVENT 0xff
+#define UNCORE_PMC_IDX_MAX_GENERIC 8
+#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
+#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
+
+#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
+
+/* SNB event control */
+#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
+#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
+#define SNB_UNC_CTL_EDGE_DET (1 << 18)
+#define SNB_UNC_CTL_EN (1 << 22)
+#define SNB_UNC_CTL_INVERT (1 << 23)
+#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
+#define NHM_UNC_CTL_CMASK_MASK 0xff000000
+#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
+
+#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ SNB_UNC_CTL_CMASK_MASK)
+
+#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ NHM_UNC_CTL_CMASK_MASK)
+
+/* SNB global control register */
+#define SNB_UNC_PERF_GLOBAL_CTL 0x391
+#define SNB_UNC_FIXED_CTR_CTRL 0x394
+#define SNB_UNC_FIXED_CTR 0x395
+
+/* SNB uncore global control */
+#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
+#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
+
+/* SNB Cbo register */
+#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
+#define SNB_UNC_CBO_0_PER_CTR0 0x706
+#define SNB_UNC_CBO_MSR_OFFSET 0x10
+
+/* NHM global control register */
+#define NHM_UNC_PERF_GLOBAL_CTL 0x391
+#define NHM_UNC_FIXED_CTR 0x394
+#define NHM_UNC_FIXED_CTR_CTRL 0x395
+
+/* NHM uncore global control */
+#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
+#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
+
+/* NHM uncore register */
+#define NHM_UNC_PERFEVTSEL0 0x3c0
+#define NHM_UNC_UNCORE_PMC0 0x3b0
+
+/* SNB-EP Box level control */
+#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
+#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
+#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
+#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
+#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+ SNBEP_PMON_BOX_CTL_RST_CTRS | \
+ SNBEP_PMON_BOX_CTL_FRZ_EN)
+/* SNB-EP event control */
+#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
+#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
+#define SNBEP_PMON_CTL_RST (1 << 17)
+#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
+#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
+#define SNBEP_PMON_CTL_EN (1 << 22)
+#define SNBEP_PMON_CTL_INVERT (1 << 23)
+#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
+#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_PMON_CTL_TRESH_MASK)
+
+/* SNB-EP Ubox event control */
+#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
+#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+
+#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
+#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_CBO_PMON_CTL_TID_EN)
+
+/* SNB-EP PCU event control */
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
+#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
+#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+
+/* SNB-EP pci control register */
+#define SNBEP_PCI_PMON_BOX_CTL 0xf4
+#define SNBEP_PCI_PMON_CTL0 0xd8
+/* SNB-EP pci counter register */
+#define SNBEP_PCI_PMON_CTR0 0xa0
+
+/* SNB-EP home agent register */
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
+#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
+/* SNB-EP memory controller register */
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
+/* SNB-EP QPI register */
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
+
+/* SNB-EP Ubox register */
+#define SNBEP_U_MSR_PMON_CTR0 0xc16
+#define SNBEP_U_MSR_PMON_CTL0 0xc10
+
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
+
+/* SNB-EP Cbo register */
+#define SNBEP_C0_MSR_PMON_CTR0 0xd16
+#define SNBEP_C0_MSR_PMON_CTL0 0xd10
+#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
+#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
+#define SNBEP_CBO_MSR_OFFSET 0x20
+
+/* SNB-EP PCU register */
+#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
+#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
+#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
+#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
+#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
+
+struct intel_uncore_ops;
+struct intel_uncore_pmu;
+struct intel_uncore_box;
+struct uncore_event_desc;
+
+struct intel_uncore_type {
+ const char *name;
+ int num_counters;
+ int num_boxes;
+ int perf_ctr_bits;
+ int fixed_ctr_bits;
+ unsigned perf_ctr;
+ unsigned event_ctl;
+ unsigned event_mask;
+ unsigned fixed_ctr;
+ unsigned fixed_ctl;
+ unsigned box_ctl;
+ unsigned msr_offset;
+ unsigned num_shared_regs:8;
+ unsigned single_fixed:1;
+ struct event_constraint unconstrainted;
+ struct event_constraint *constraints;
+ struct intel_uncore_pmu *pmus;
+ struct intel_uncore_ops *ops;
+ struct uncore_event_desc *event_descs;
+ const struct attribute_group *attr_groups[3];
+};
+
+#define format_group attr_groups[0]
+
+struct intel_uncore_ops {
+ void (*init_box)(struct intel_uncore_box *);
+ void (*disable_box)(struct intel_uncore_box *);
+ void (*enable_box)(struct intel_uncore_box *);
+ void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
+ void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
+ u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
+ int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
+ struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
+ struct perf_event *);
+ void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
+};
+
+struct intel_uncore_pmu {
+ struct pmu pmu;
+ char name[UNCORE_PMU_NAME_LEN];
+ int pmu_idx;
+ int func_id;
+ struct intel_uncore_type *type;
+ struct intel_uncore_box ** __percpu box;
+ struct list_head box_list;
+};
+
+struct intel_uncore_extra_reg {
+ raw_spinlock_t lock;
+ u64 config1;
+ atomic_t ref;
+};
+
+struct intel_uncore_box {
+ int phys_id;
+ int n_active; /* number of active events */
+ int n_events;
+ int cpu; /* cpu to collect events */
+ unsigned long flags;
+ atomic_t refcnt;
+ struct perf_event *events[UNCORE_PMC_IDX_MAX];
+ struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
+ unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+ u64 tags[UNCORE_PMC_IDX_MAX];
+ struct pci_dev *pci_dev;
+ struct intel_uncore_pmu *pmu;
+ struct hrtimer hrtimer;
+ struct list_head list;
+ struct intel_uncore_extra_reg shared_regs[0];
+};
+
+#define UNCORE_BOX_FLAG_INITIATED 0
+
+struct uncore_event_desc {
+ struct kobj_attribute attr;
+ const char *config;
+};
+
+#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
+{ \
+ .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
+ .config = _config, \
+}
+
+#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+
+
+static ssize_t uncore_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_event_desc *event =
+ container_of(attr, struct uncore_event_desc, attr);
+ return sprintf(buf, "%s", event->config);
+}
+
+static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
+{
+ return box->pmu->type->box_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctr;
+}
+
+static inline
+unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
+{
+ return idx * 4 + box->pmu->type->event_ctl;
+}
+
+static inline
+unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+ return idx * 8 + box->pmu->type->perf_ctr;
+}
+
+static inline
+unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+{
+ if (!box->pmu->type->box_ctl)
+ return 0;
+ return box->pmu->type->box_ctl +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+{
+ if (!box->pmu->type->fixed_ctl)
+ return 0;
+ return box->pmu->type->fixed_ctl +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctr +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
+{
+ return idx + box->pmu->type->event_ctl +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+ return idx + box->pmu->type->perf_ctr +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
+{
+ if (box->pci_dev)
+ return uncore_pci_fixed_ctl(box);
+ else
+ return uncore_msr_fixed_ctl(box);
+}
+
+static inline
+unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
+{
+ if (box->pci_dev)
+ return uncore_pci_fixed_ctr(box);
+ else
+ return uncore_msr_fixed_ctr(box);
+}
+
+static inline
+unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
+{
+ if (box->pci_dev)
+ return uncore_pci_event_ctl(box, idx);
+ else
+ return uncore_msr_event_ctl(box, idx);
+}
+
+static inline
+unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+ if (box->pci_dev)
+ return uncore_pci_perf_ctr(box, idx);
+ else
+ return uncore_msr_perf_ctr(box, idx);
+}
+
+static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
+{
+ return box->pmu->type->perf_ctr_bits;
+}
+
+static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctr_bits;
+}
+
+static inline int uncore_num_counters(struct intel_uncore_box *box)
+{
+ return box->pmu->type->num_counters;
+}
+
+static inline void uncore_disable_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->type->ops->disable_box)
+ box->pmu->type->ops->disable_box(box);
+}
+
+static inline void uncore_enable_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->type->ops->enable_box)
+ box->pmu->type->ops->enable_box(box);
+}
+
+static inline void uncore_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ box->pmu->type->ops->disable_event(box, event);
+}
+
+static inline void uncore_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ box->pmu->type->ops->enable_event(box, event);
+}
+
+static inline u64 uncore_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ return box->pmu->type->ops->read_counter(box, event);
+}
+
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+ if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->init_box)
+ box->pmu->type->ops->init_box(box);
+ }
+}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 47124a73dd73..92c7e39a079f 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
*
- * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
- * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
+ * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
+ * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
*/
}
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
- (void)checking_wrmsrl(hwc->config_base,
+ (void)wrmsrl_safe(hwc->config_base,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
bind = &p4_pebs_bind_map[idx];
- (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
- (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
+ (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+ (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
}
static void p4_pmu_enable_event(struct perf_event *event)
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
*/
p4_pmu_enable_pebs(hwc->config);
- (void)checking_wrmsrl(escr_addr, escr_conf);
- (void)checking_wrmsrl(hwc->config_base,
+ (void)wrmsrl_safe(escr_addr, escr_conf);
+ (void)wrmsrl_safe(hwc->config_base,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void)
unsigned int low, high;
/* If we get stripped -- indexing fails */
- BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
+ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
if (!(low & (1 << 7))) {
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 32bcfc7dd230..e4dd0f7a0453 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base, val);
+ (void)wrmsrl_safe(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base, val);
+ (void)wrmsrl_safe(hwc->config_base, val);
}
PMU_FORMAT_ATTR(event, "config:0-7" );
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index addf9e82a7f2..ee8e9abc859f 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
- { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
+ { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8bfb6146f753..3f61904365cf 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -444,12 +444,12 @@ void kgdb_roundup_cpus(unsigned long flags)
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- * @vector: The error vector of the exception that happened.
+ * @e_vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
- * @remcom_in_buffer: The buffer of the packet we have read.
- * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- * @regs: The &struct pt_regs of the current process.
+ * @remcomInBuffer: The buffer of the packet we have read.
+ * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ * @linux_regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 086eb58c6e80..f1b42b3a186c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
bool ret = false;
struct pvclock_vcpu_time_info *src;
- /*
- * per_cpu() is safe here because this function is only called from
- * timer functions where preemption is already disabled.
- */
- WARN_ON(!in_atomic());
src = &__get_cpu_var(hv_clock);
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index fbdfc6917180..1649cf899ad6 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -87,6 +87,7 @@
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+#include <asm/perf_event.h>
MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -277,7 +278,6 @@ static int reload_for_cpu(int cpu)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
int err = 0;
- mutex_lock(&microcode_mutex);
if (uci->valid) {
enum ucode_state ustate;
@@ -288,7 +288,6 @@ static int reload_for_cpu(int cpu)
if (ustate == UCODE_ERROR)
err = -EINVAL;
}
- mutex_unlock(&microcode_mutex);
return err;
}
@@ -298,19 +297,31 @@ static ssize_t reload_store(struct device *dev,
const char *buf, size_t size)
{
unsigned long val;
- int cpu = dev->id;
- ssize_t ret = 0;
+ int cpu;
+ ssize_t ret = 0, tmp_ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
- if (val == 1) {
- get_online_cpus();
- if (cpu_online(cpu))
- ret = reload_for_cpu(cpu);
- put_online_cpus();
+ if (val != 1)
+ return size;
+
+ get_online_cpus();
+ mutex_lock(&microcode_mutex);
+ for_each_online_cpu(cpu) {
+ tmp_ret = reload_for_cpu(cpu);
+ if (tmp_ret != 0)
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+
+ /* save retval of the first encountered reload error */
+ if (!ret)
+ ret = tmp_ret;
}
+ if (!ret)
+ perf_check_microcode();
+ mutex_unlock(&microcode_mutex);
+ put_online_cpus();
if (!ret)
ret = size;
@@ -339,7 +350,6 @@ static DEVICE_ATTR(version, 0400, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
static struct attribute *mc_default_attrs[] = {
- &dev_attr_reload.attr,
&dev_attr_version.attr,
&dev_attr_processor_flags.attr,
NULL
@@ -516,6 +526,16 @@ static const struct x86_cpu_id microcode_id[] = {
MODULE_DEVICE_TABLE(x86cpu, microcode_id);
#endif
+static struct attribute *cpu_root_microcode_attrs[] = {
+ &dev_attr_reload.attr,
+ NULL
+};
+
+static struct attribute_group cpu_root_microcode_group = {
+ .name = "microcode",
+ .attrs = cpu_root_microcode_attrs,
+};
+
static int __init microcode_init(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
@@ -540,16 +560,25 @@ static int __init microcode_init(void)
mutex_lock(&microcode_mutex);
error = subsys_interface_register(&mc_cpu_interface);
-
+ if (!error)
+ perf_check_microcode();
mutex_unlock(&microcode_mutex);
put_online_cpus();
if (error)
goto out_pdev;
+ error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_microcode_group);
+
+ if (error) {
+ pr_err("Error creating microcode group!\n");
+ goto out_driver;
+ }
+
error = microcode_dev_init();
if (error)
- goto out_driver;
+ goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
@@ -559,7 +588,11 @@ static int __init microcode_init(void)
return 0;
-out_driver:
+ out_ucode_group:
+ sysfs_remove_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_microcode_group);
+
+ out_driver:
get_online_cpus();
mutex_lock(&microcode_mutex);
@@ -568,7 +601,7 @@ out_driver:
mutex_unlock(&microcode_mutex);
put_online_cpus();
-out_pdev:
+ out_pdev:
platform_device_unregister(microcode_pdev);
return error;
@@ -584,6 +617,9 @@ static void __exit microcode_exit(void)
unregister_hotcpu_notifier(&mc_cpu_notifier);
unregister_syscore_ops(&mc_syscore_ops);
+ sysfs_remove_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_microcode_group);
+
get_online_cpus();
mutex_lock(&microcode_mutex);
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index e31bf8d5c4d2..149b8d9c6ad4 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
static void __init init_nmi_testsuite(void)
{
/* trap all the unknown NMIs we may generate */
- register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
+ register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
}
static void __init cleanup_nmi_testsuite(void)
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
{
unsigned long timeout;
- if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+ if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
NMI_FLAG_FIRST, "nmi_selftest")) {
nmi_fail = FAILURE;
return;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9ce885996fd7..17fff18a1031 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif
.wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe,
- .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = native_write_msr_safe,
- .wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
.read_tscp = native_read_tscp,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 62c9457ccd2f..c0f420f76cd3 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
unsigned long dma_mask;
- struct page *page = NULL;
+ struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO;
again:
+ page = NULL;
if (!(flag & GFP_ATOMIC))
page = dma_alloc_from_contiguous(dev, count, get_order(size));
if (!page)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 61cdf7fdf099..3e215ba68766 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
task->thread.gs = addr;
if (doit) {
load_gs_index(0);
- ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+ ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
}
}
put_cpu();
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
/* set the selector to 0 to not confuse
__switch_to */
loadsegment(fs, 0);
- ret = checking_wrmsrl(MSR_FS_BASE, addr);
+ ret = wrmsrl_safe(MSR_FS_BASE, addr);
}
}
put_cpu();
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 79c45af81604..5de92f1abd76 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -451,6 +451,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
},
},
+ { /* Handle problems with rebooting on the Precision M6600. */
+ .callback = set_pci_reboot,
+ .ident = "Dell OptiPlex 990",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ },
+ },
{ }
};
@@ -639,9 +647,11 @@ void native_machine_shutdown(void)
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
/*
- * O.K Now that I'm on the appropriate processor,
- * stop all of the others.
+ * O.K Now that I'm on the appropriate processor, stop all of the
+ * others. Also disable the local irq to not receive the per-cpu
+ * timer interrupt which may trigger scheduler's load balance.
*/
+ local_irq_disable();
stop_other_cpus();
#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index b2fd28ff84b5..27e2eeff7a4b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -340,9 +340,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
- if (c->phys_proc_id == o->phys_proc_id)
- return topology_sane(c, o, "mc");
+ if (c->phys_proc_id == o->phys_proc_id) {
+ if (cpu_has(c, X86_FEATURE_AMD_DCM))
+ return true;
+ return topology_sane(c, o, "mc");
+ }
return false;
}
@@ -373,6 +376,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
if ((i == cpu) || (has_mc && match_llc(c, o)))
link_mask(llc_shared, cpu, i);
+ }
+
+ /*
+ * This needs a separate iteration over the cpus because we rely on all
+ * cpu_sibling_mask links to be set-up.
+ */
+ for_each_cpu(i, cpu_sibling_setup_mask) {
+ o = &cpu_data(i);
+
if ((i == cpu) || (has_mc && match_mc(c, o))) {
link_mask(core, cpu, i);
@@ -401,15 +413,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
/* maps the cpu to the sched domain representing multi-core */
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- /*
- * For perf, we return last level cache shared map.
- * And for power savings, we return cpu_core_map
- */
- if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
- return cpu_core_mask(cpu);
- else
- return cpu_llc_shared_mask(cpu);
+ return cpu_llc_shared_mask(cpu);
}
static void impress_friends(void)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index dc4e910a7d96..36fd42091fa7 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -409,9 +409,10 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
* Return 0 on success or a -ve number on error.
*/
-int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
{
int ret;
struct insn insn;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 7515cf0e1805..5db36caf4289 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -139,6 +139,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
return nr;
}
+#ifdef CONFIG_SECCOMP
+static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
+{
+ if (!seccomp_mode(&tsk->seccomp))
+ return 0;
+ task_pt_regs(tsk)->orig_ax = syscall_nr;
+ task_pt_regs(tsk)->ax = syscall_nr;
+ return __secure_computing(syscall_nr);
+}
+#else
+#define vsyscall_seccomp(_tsk, _nr) 0
+#endif
+
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
@@ -174,6 +187,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
int vsyscall_nr;
int prev_sig_on_uaccess_error;
long ret;
+ int skip;
/*
* No point in checking CS -- the only way to get here is a user mode
@@ -205,9 +219,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
}
tsk = current;
- if (seccomp_mode(&tsk->seccomp))
- do_exit(SIGKILL);
-
/*
* With a real vsyscall, page faults cause SIGSEGV. We want to
* preserve that behavior to make writing exploits harder.
@@ -222,8 +233,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
* address 0".
*/
ret = -EFAULT;
+ skip = 0;
switch (vsyscall_nr) {
case 0:
+ skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
+ if (skip)
+ break;
+
if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
!write_ok_or_segv(regs->si, sizeof(struct timezone)))
break;
@@ -234,6 +250,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
break;
case 1:
+ skip = vsyscall_seccomp(tsk, __NR_time);
+ if (skip)
+ break;
+
if (!write_ok_or_segv(regs->di, sizeof(time_t)))
break;
@@ -241,6 +261,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
break;
case 2:
+ skip = vsyscall_seccomp(tsk, __NR_getcpu);
+ if (skip)
+ break;
+
if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
!write_ok_or_segv(regs->si, sizeof(unsigned)))
break;
@@ -253,6 +277,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+ if (skip) {
+ if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+ goto do_ret;
+ goto done; /* seccomp trace/trap */
+ }
+
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
@@ -271,10 +301,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
regs->ax = ret;
+do_ret:
/* Emulate a ret instruction. */
regs->ip = caller;
regs->sp += 8;
-
+done:
return true;
sigsegv:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index be3cea4407ff..57e168e27b5b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
{
struct kvm_mmu_page *page;
+ if (list_empty(&kvm->arch.active_mmu_pages))
+ return;
+
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 2e88438ffd83..9b7ec1150ab0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{
- if (idx < X86_PMC_IDX_FIXED)
+ if (idx < INTEL_PMC_IDX_FIXED)
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
else
- return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+ return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
}
void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
@@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
if (pmc_is_gp(pmc))
reprogram_gp_counter(pmc, pmc->eventsel);
else {
- int fidx = idx - X86_PMC_IDX_FIXED;
+ int fidx = idx - INTEL_PMC_IDX_FIXED;
reprogram_fixed_counter(pmc,
fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
}
@@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
return;
pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
- X86_PMC_MAX_GENERIC);
+ INTEL_PMC_MAX_GENERIC);
pmu->counter_bitmask[KVM_PMC_GP] =
((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
bitmap_len = (entry->eax >> 24) & 0xff;
@@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters = 0;
} else {
pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
- X86_PMC_MAX_FIXED);
+ INTEL_PMC_MAX_FIXED);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
}
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
- (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
+ (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
}
@@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
memset(pmu, 0, sizeof(*pmu));
- for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+ for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
- for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+ for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
- pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+ pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
}
init_irq_work(&pmu->irq_work, trigger_pmi);
kvm_pmu_cpuid_update(vcpu);
@@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
int i;
irq_work_sync(&pmu->irq_work);
- for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+ for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
struct kvm_pmc *pmc = &pmu->gp_counters[i];
stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
- for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+ for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 911d2641f14c..62d02e3c3ed6 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -710,16 +710,6 @@ TRACE_EVENT(kvm_skinit,
__entry->rip, __entry->slb)
);
-#define __print_insn(insn, ilen) ({ \
- int i; \
- const char *ret = p->buffer + p->len; \
- \
- for (i = 0; i < ilen; ++i) \
- trace_seq_printf(p, " %02x", insn[i]); \
- trace_seq_printf(p, "%c", 0); \
- ret; \
- })
-
#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
#define KVM_EMUL_INSN_F_CS_D (1 << 2)
@@ -786,7 +776,7 @@ TRACE_EVENT(kvm_emulate_insn,
TP_printk("%x:%llx:%s (%s)%s",
__entry->csbase, __entry->rip,
- __print_insn(__entry->insn, __entry->len),
+ __print_hex(__entry->insn, __entry->len),
__print_symbolic(__entry->flags,
kvm_trace_symbol_emul_flags),
__entry->failed ? " failed" : ""
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 459b58a8a15c..25b7ae8d058a 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
* @src: source address
* @dst: destination address
* @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
*
* Returns an 32bit unfolded checksum of the buffer.
*/
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
index a311cc59b65d..8d6ef78b5d01 100644
--- a/arch/x86/lib/msr-reg-export.c
+++ b/arch/x86/lib/msr-reg-export.c
@@ -1,5 +1,5 @@
#include <linux/module.h>
#include <asm/msr.h>
-EXPORT_SYMBOL(native_rdmsr_safe_regs);
-EXPORT_SYMBOL(native_wrmsr_safe_regs);
+EXPORT_SYMBOL(rdmsr_safe_regs);
+EXPORT_SYMBOL(wrmsr_safe_regs);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 69fa10623f21..f6d13eefad10 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -6,13 +6,13 @@
#ifdef CONFIG_X86_64
/*
- * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
+ * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
*
* reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
*
*/
.macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
CFI_STARTPROC
pushq_cfi %rbx
pushq_cfi %rbp
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs)
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
.endm
#else /* X86_32 */
.macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
CFI_STARTPROC
pushl_cfi %ebx
pushl_cfi %ebp
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs)
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
.endm
#endif
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index f61ee67ec00f..4f74d94c8d97 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <asm/word-at-a-time.h>
+#include <linux/sched.h>
/*
* best effort, GUP based copy_from_user() that is NMI-safe
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
void *map;
int ret;
+ if (__range_not_ok(from, n, TASK_SIZE))
+ return len;
+
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 819137904428..5d7e51f3fd28 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -28,7 +28,7 @@
# - (66): the last prefix is 0x66
# - (F3): the last prefix is 0xF3
# - (F2): the last prefix is 0xF2
-#
+# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
Table: one byte opcode
Referrer:
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp
b5: LGS Gv,Mp
b6: MOVZX Gv,Eb
b7: MOVZX Gv,Ew
-b8: JMPE | POPCNT Gv,Ev (F3)
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
b9: Grp10 (1A)
ba: Grp8 Ev,Ib (1A)
bb: BTC Ev,Gv
-bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
be: MOVSX Gv,Eb
bf: MOVSX Gv,Ew
# 0x0f 0xc0-0xcf
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 97141c26a13a..bc4e9d84157f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
extra += PMD_SIZE;
#endif
/* The first 2/4M doesn't use large pages. */
- extra += mr->end - mr->start;
+ if (mr->start < PMD_SIZE)
+ extra += mr->end - mr->start;
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index be1ef574ce9a..78fe3f1ac49f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -180,7 +180,7 @@ err_free_memtype:
/**
* ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
+ * @phys_addr: bus address of the memory
* @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache);
/**
* ioremap_wc - map memory into CPU space write combined
- * @offset: bus address of the memory
+ * @phys_addr: bus address of the memory
* @size: size of the resource to map
*
* This version of ioremap ensures that the memory is marked write combining.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e1ebde315210..a718e0d23503 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
/**
* clflush_cache_range - flush a cache range with clflush
- * @addr: virtual start address
+ * @vaddr: virtual start address
* @size: number of bytes to flush
*
* clflush is an unordered instruction which needs fencing with mfence
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 732af3a96183..4599c3e8bcb6 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
return;
}
+ node_set(node, numa_nodes_parsed);
+
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 303f08637826..b2b94438ff05 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -312,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
goto fail;
}
/* both registers must be reserved */
- if (num_counters == AMD64_NUM_COUNTERS_F15H) {
+ if (num_counters == AMD64_NUM_COUNTERS_CORE) {
msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
} else {
@@ -514,7 +514,7 @@ static int op_amd_init(struct oprofile_operations *ops)
ops->create_files = setup_ibs_files;
if (boot_cpu_data.x86 == 0x15) {
- num_counters = AMD64_NUM_COUNTERS_F15H;
+ num_counters = AMD64_NUM_COUNTERS_CORE;
} else {
num_counters = AMD64_NUM_COUNTERS;
}
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 3c6e328483c7..028454f0c3a5 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -110,19 +110,16 @@ static struct kmsg_dumper dw_dumper;
static int dumper_registered;
static void dw_kmsg_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason,
- const char *s1, unsigned long l1,
- const char *s2, unsigned long l2)
+ enum kmsg_dump_reason reason)
{
- int i;
+ static char line[1024];
+ size_t len;
/* When run to this, we'd better re-init the HW */
mrst_early_console_init();
- for (i = 0; i < l1; i++)
- early_mrst_console.write(&early_mrst_console, s1 + i, 1);
- for (i = 0; i < l2; i++)
- early_mrst_console.write(&early_mrst_console, s2 + i, 1);
+ while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+ early_mrst_console.write(&early_mrst_console, line, len);
}
/* Set the ratio rate to 115200, 8n1, IRQ disabled */
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e31bcd8f2eee..fd41a9262d65 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
EXPORT_SYMBOL_GPL(intel_scu_notifier);
/* Called by IPC driver */
-void intel_scu_devices_create(void)
+void __devinit intel_scu_devices_create(void)
{
int i;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 3ae0e61abd23..59880afa851f 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void)
*/
mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) {
- mmr_image &= ~(1L << UV2_LEG_SHFT);
mmr_image |= (1L << UV2_EXT_SHFT);
}
write_mmr_misc_control(pnode, mmr_image);
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 5f6a5b6c3a15..ddcf39b1a18d 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -66,9 +66,10 @@ BEGIN {
rex_expr = "^REX(\\.[XRWB]+)*"
fpu_expr = "^ESC" # TODO
- lprefix1_expr = "\\(66\\)"
+ lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\(F2\\)"
+ lprefix3_expr = "\\((F2|!F3)\\)"
+ lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
# All opcodes starting with lower-case 'v' or with (v1) superscript
@@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(ext, lprefix1_expr)) {
lptable1[idx] = add_flags(lptable1[idx],flags)
variant = "INAT_VARIANT"
- } else if (match(ext, lprefix2_expr)) {
+ }
+ if (match(ext, lprefix2_expr)) {
lptable2[idx] = add_flags(lptable2[idx],flags)
variant = "INAT_VARIANT"
- } else if (match(ext, lprefix3_expr)) {
+ }
+ if (match(ext, lprefix3_expr)) {
lptable3[idx] = add_flags(lptable3[idx],flags)
variant = "INAT_VARIANT"
- } else {
+ }
+ if (!match(ext, lprefix_expr)){
table[idx] = add_flags(table[idx],flags)
}
}
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 416bd40c0eba..68d1dc91b37b 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -39,9 +39,9 @@
#undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
-typedef void (*sys_call_ptr_t)(void);
+typedef asmlinkage void (*sys_call_ptr_t)(void);
-extern void sys_ni_syscall(void);
+extern asmlinkage void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
/*
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 66e6d9359826..0faad646f5fd 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
{
/* Load these always in case some future AMD CPU supports
SYSENTER from compat mode too. */
- checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
- checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
- checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+ wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+ wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e74df9548a02..ed7d54985d0c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -209,6 +209,9 @@ static void __init xen_banner(void)
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
+#define CPUID_THERM_POWER_LEAF 6
+#define APERFMPERF_PRESENT 0
+
static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
@@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
*dx = cpuid_leaf5_edx_val;
return;
+ case CPUID_THERM_POWER_LEAF:
+ /* Disabling APERFMPERF for kernel usage */
+ maskecx = ~(1 << APERFMPERF_PRESENT);
+ break;
+
case 0xb:
/* Suppress extended topology stuff */
maskebx = 0;
@@ -1116,9 +1124,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe,
- .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = xen_write_msr_safe,
- .wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index ffd08c414e91..64effdc6da94 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
+ int ret = 0;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
@@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags);
+ /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
+ * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
+ * pfn so that the following mfn_to_pfn(mfn) calls will return the
+ * pfn from the m2p_override (the backend pfn) instead.
+ * We need to do this because the pages shared by the frontend
+ * (xen-blkfront) can be already locked (lock_page, called by
+ * do_read_cache_page); when the userspace backend tries to use them
+ * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
+ * do_blockdev_direct_IO is going to try to lock the same pages
+ * again resulting in a deadlock.
+ * As a side effect get_user_pages_fast might not be safe on the
+ * frontend pages while they are being shared with the backend,
+ * because mfn_to_pfn (that ends up being called by GUPF) will
+ * return the backend pfn rather than the frontend pfn. */
+ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+ if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+ set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+
return 0;
}
EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
+ int ret = 0;
pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
@@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
} else
set_phys_to_machine(pfn, page->index);
+ /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
+ * somewhere in this domain, even before being added to the
+ * m2p_override (see comment above in m2p_add_override).
+ * If there are no other entries in the m2p_override corresponding
+ * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
+ * the original pfn (the one shared by the frontend): the backend
+ * cannot do any IO on this page anymore because it has been
+ * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
+ * the original pfn causes mfn_to_pfn(mfn) to return the frontend
+ * pfn again. */
+ mfn &= ~FOREIGN_FRAME_BIT;
+ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+ if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ m2p_find_override(mfn) == NULL)
+ set_phys_to_machine(pfn, mfn);
+
return 0;
}
EXPORT_SYMBOL_GPL(m2p_remove_override);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 3ebba0753d38..a4790bf22c59 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -371,7 +371,8 @@ char * __init xen_memory_setup(void)
populated = xen_populate_chunk(map, memmap.nr_entries,
max_pfn, &last_pfn, xen_released_pages);
- extra_pages += (xen_released_pages - populated);
+ xen_released_pages -= populated;
+ extra_pages += xen_released_pages;
if (last_pfn > max_pfn) {
max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7608559de93a..f973754ddf90 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -68,8 +68,8 @@ endif
# Only build variant and/or platform if it includes a Makefile
-buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
-buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
# Find libgcc.a
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 9b306e550e3f..2c8d6a3d250a 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
/* Don't leak any random bits. */
- memset(elfregs, 0, sizeof (elfregs));
+ memset(elfregs, 0, sizeof(*elfregs));
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 88ecea3facb4..ee2e2089483d 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -83,7 +83,6 @@ SECTIONS
_text = .;
_stext = .;
- _ftext = .;
.text :
{
@@ -112,7 +111,7 @@ SECTIONS
EXCEPTION_TABLE(16)
/* Data section */
- _fdata = .;
+ _sdata = .;
RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index ba150e5de2eb..db955179da2d 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -26,11 +26,7 @@
#include <asm/bootparam.h>
#include <asm/page.h>
-
-/* References to section boundaries */
-
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
/*
* mem_reserve(start, end, must_exist)
@@ -197,9 +193,9 @@ void __init mem_init(void)
reservedpages++;
}
- codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
- datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+ codesize = (unsigned long) _etext - (unsigned long) _stext;
+ datasize = (unsigned long) _edata - (unsigned long) _sdata;
+ initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
"%ldk data, %ldk init %ldk highmem)\n",
@@ -237,7 +233,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void free_initmem(void)
{
- free_reserved_mem(&__init_begin, &__init_end);
- printk("Freeing unused kernel memory: %dk freed\n",
- (&__init_end - &__init_begin) >> 10);
+ free_reserved_mem(__init_begin, __init_end);
+ printk("Freeing unused kernel memory: %zuk freed\n",
+ (__init_end - __init_begin) >> 10);
}