summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format25
-rw-r--r--.gitignore4
-rw-r--r--Documentation/admin-guide/acpi/fan_performance_states.rst4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scpi.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/omap/mpu.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml36
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml2
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx6345.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-udma.yaml14
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml14
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/register-bit-led.txt2
-rw-r--r--Documentation/devicetree/bindings/media/ti,cal.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti/emif.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/twl-family.txt (renamed from Documentation/devicetree/bindings/mfd/twl-familly.txt)0
-rw-r--r--Documentation/devicetree/bindings/mfd/zii,rave-sp.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt7
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml198
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml53
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt29
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt3
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/domain-idle-state.txt33
-rw-r--r--Documentation/devicetree/bindings/power/domain-idle-state.yaml64
-rw-r--r--Documentation/devicetree/bindings/power/power-domain.yaml24
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml6
-rw-r--r--Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml2
-rw-r--r--Documentation/driver-api/dmaengine/client.rst14
-rw-r--r--Documentation/filesystems/debugfs.txt6
-rw-r--r--Documentation/filesystems/porting.rst8
-rw-r--r--Documentation/hwmon/adm1177.rst3
-rw-r--r--Documentation/networking/devlink/devlink-region.rst3
-rw-r--r--Documentation/networking/devlink/mlx5.rst6
-rw-r--r--Documentation/networking/ethtool-netlink.rst272
-rw-r--r--Documentation/networking/ip-sysctl.txt9
-rw-r--r--Documentation/networking/net_failover.rst6
-rw-r--r--Documentation/networking/rds.txt2
-rw-r--r--Documentation/networking/sfp-phylink.rst32
-rw-r--r--Documentation/power/index.rst1
-rw-r--r--MAINTAINERS49
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig5
-rw-r--r--arch/arm/boot/dts/am437x-idk-evm.dts4
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts3
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts2
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts2
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts4
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra76x.dtsi5
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi6
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi4
-rw-r--r--arch/arm/boot/dts/motorola-mapphone-common.dtsi13
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi2
-rw-r--r--arch/arm/configs/bcm2835_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/common.h4
-rw-r--r--arch/arm/mach-imx/resume-imx6.S24
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S14
-rw-r--r--arch/arm/mach-meson/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts5
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi6
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h2
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h10
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h48
-rw-r--r--arch/arm64/include/asm/kvm_host.h32
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h3
-rw-r--r--arch/arm64/include/asm/virt.h2
-rw-r--r--arch/arm64/kvm/hyp/switch.c39
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c4
-rw-r--r--arch/arm64/mm/context.c20
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts44
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Kconfig.socs24
-rw-r--r--arch/riscv/Makefile6
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts5
-rw-r--r--arch/riscv/configs/defconfig17
-rw-r--r--arch/riscv/configs/rv32_defconfig18
-rw-r--r--arch/riscv/include/asm/syscall.h7
-rw-r--r--arch/riscv/kernel/entry.S11
-rw-r--r--arch/riscv/kernel/module.c16
-rw-r--r--arch/riscv/kernel/ptrace.c11
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/um/drivers/vector_kern.c1
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/crypto/Makefile7
-rw-r--r--arch/x86/include/asm/io_bitmap.h9
-rw-r--r--arch/x86/include/asm/paravirt.h7
-rw-r--r--arch/x86/include/asm/paravirt_types.h4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/kvm.c65
-rw-r--r--arch/x86/kernel/paravirt.c5
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kvm/Kconfig13
-rw-r--r--arch/x86/kvm/Makefile1
-rw-r--r--arch/x86/kvm/svm.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.c17
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--arch/x86/mm/dump_pagetables.c7
-rw-r--r--arch/x86/platform/efi/efi_64.c151
-rw-r--r--arch/x86/xen/enlighten_pv.c25
-rw-r--r--block/bfq-cgroup.c9
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-mq-sched.c22
-rw-r--r--block/blk-mq-tag.c4
-rw-r--r--block/blk-mq-tag.h4
-rw-r--r--block/blk-mq.c28
-rw-r--r--block/blk-mq.h5
-rw-r--r--drivers/acpi/acpi_watchdog.c15
-rw-r--r--drivers/android/binder.c9
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/android/binderfs.c7
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/auxdisplay/Kconfig16
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c4
-rw-r--r--drivers/base/core.c27
-rw-r--r--drivers/base/platform.c25
-rw-r--r--drivers/base/swnode.c14
-rw-r--r--drivers/block/null_blk.h3
-rw-r--r--drivers/block/null_blk_main.c2
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/virtio_blk.c17
-rw-r--r--drivers/block/xen-blkfront.c80
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c4
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/crypto/chelsio/Kconfig11
-rw-r--r--drivers/crypto/chelsio/Makefile3
-rw-r--r--drivers/crypto/chelsio/chcr_common.h135
-rw-r--r--drivers/crypto/chelsio/chcr_core.c51
-rw-r--r--drivers/crypto/chelsio/chcr_core.h7
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c2021
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.h98
-rw-r--r--drivers/devfreq/devfreq.c4
-rw-r--r--drivers/dma-buf/dma-buf.c1
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/idxd/cdev.c4
-rw-r--r--drivers/dma/idxd/sysfs.c27
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/ti/k3-udma.c493
-rw-r--r--drivers/edac/synopsys_edac.c22
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/imx/imx-scu.c27
-rw-r--r--drivers/firmware/imx/misc.c8
-rw-r--r--drivers/firmware/imx/scu-pd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c69
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c114
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c184
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c20
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c58
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c59
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_request.c69
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c66
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c5
-rw-r--r--drivers/hid/hid-hyperv.c6
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c54
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c36
-rw-r--r--drivers/ide/ide-gd.c2
-rw-r--r--drivers/infiniband/core/cm.c1
-rw-r--r--drivers/infiniband/core/cma.c15
-rw-r--r--drivers/infiniband/core/core_priv.h14
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/rw.c31
-rw-r--r--drivers/infiniband/core/security.c14
-rw-r--r--drivers/infiniband/core/umem_odp.c24
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/core/verbs.c10
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/interconnect/core.c9
-rw-r--r--drivers/macintosh/therm_windtunnel.c52
-rw-r--r--drivers/md/bcache/alloc.c18
-rw-r--r--drivers/md/bcache/btree.c13
-rw-r--r--drivers/md/dm-bio-record.h15
-rw-r--r--drivers/md/dm-cache-target.c6
-rw-r--r--drivers/md/dm-integrity.c84
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-writecache.c16
-rw-r--r--drivers/md/dm-zoned-target.c10
-rw-r--r--drivers/md/dm.c22
-rw-r--r--drivers/media/mc/mc-entity.c4
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c34
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/misc/altera-stapl/altera.c12
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c17
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bareudp.c17
-rw-r--r--drivers/net/bonding/bond_alb.c20
-rw-r--r--drivers/net/can/dev.c1
-rw-r--r--drivers/net/can/slcan.c7
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c26
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/mt7530.c60
-rw-r--r--drivers/net/dsa/mt7530.h7
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c8
-rw-r--r--drivers/net/dsa/ocelot/felix.c47
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c131
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h2
-rw-r--r--drivers/net/ethernet/atheros/Kconfig2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c188
-rw-r--r--drivers/net/ethernet/broadcom/b44.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c129
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c24
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c14
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c26
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c12
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c9
-rw-r--r--drivers/net/ethernet/dlink/sundance.c20
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/fealnx.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c130
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c17
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c70
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h33
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c12
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c18
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c50
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c35
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c96
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c74
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c24
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c119
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c243
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c330
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c174
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c165
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c156
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c12
-rw-r--r--drivers/net/ethernet/marvell/skge.h6
-rw-r--r--drivers/net/ethernet/marvell/sky2.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1356
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c585
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c249
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c110
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h10
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c561
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h26
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c163
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c262
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h403
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c24
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c27
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h38
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c61
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h15
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c36
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c73
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c32
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c36
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/tx.c38
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c29
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h6
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c9
-rw-r--r--drivers/net/ethernet/sun/cassini.c28
-rw-r--r--drivers/net/ethernet/sun/sungem.c30
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c198
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c41
-rw-r--r--drivers/net/ipa/Kconfig19
-rw-r--r--drivers/net/ipa/Makefile12
-rw-r--r--drivers/net/ipa/gsi.c2055
-rw-r--r--drivers/net/ipa/gsi.h257
-rw-r--r--drivers/net/ipa/gsi_private.h118
-rw-r--r--drivers/net/ipa/gsi_reg.h417
-rw-r--r--drivers/net/ipa/gsi_trans.c786
-rw-r--r--drivers/net/ipa/gsi_trans.h226
-rw-r--r--drivers/net/ipa/ipa.h148
-rw-r--r--drivers/net/ipa/ipa_clock.c313
-rw-r--r--drivers/net/ipa/ipa_clock.h53
-rw-r--r--drivers/net/ipa/ipa_cmd.c680
-rw-r--r--drivers/net/ipa/ipa_cmd.h195
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c307
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c329
-rw-r--r--drivers/net/ipa/ipa_data.h280
-rw-r--r--drivers/net/ipa/ipa_endpoint.c1707
-rw-r--r--drivers/net/ipa/ipa_endpoint.h110
-rw-r--r--drivers/net/ipa/ipa_gsi.c54
-rw-r--r--drivers/net/ipa/ipa_gsi.h60
-rw-r--r--drivers/net/ipa/ipa_interrupt.c253
-rw-r--r--drivers/net/ipa/ipa_interrupt.h117
-rw-r--r--drivers/net/ipa/ipa_main.c954
-rw-r--r--drivers/net/ipa/ipa_mem.c314
-rw-r--r--drivers/net/ipa/ipa_mem.h90
-rw-r--r--drivers/net/ipa/ipa_modem.c383
-rw-r--r--drivers/net/ipa/ipa_modem.h31
-rw-r--r--drivers/net/ipa/ipa_qmi.c538
-rw-r--r--drivers/net/ipa/ipa_qmi.h41
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c663
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h252
-rw-r--r--drivers/net/ipa/ipa_reg.c38
-rw-r--r--drivers/net/ipa/ipa_reg.h476
-rw-r--r--drivers/net/ipa/ipa_smp2p.c335
-rw-r--r--drivers/net/ipa/ipa_smp2p.h48
-rw-r--r--drivers/net/ipa/ipa_table.c700
-rw-r--r--drivers/net/ipa/ipa_table.h103
-rw-r--r--drivers/net/ipa/ipa_uc.c211
-rw-r--r--drivers/net/ipa/ipa_uc.h32
-rw-r--r--drivers/net/ipa/ipa_version.h23
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c19
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c5
-rw-r--r--drivers/net/macsec.c25
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/Kconfig14
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/bcm63xx.c1
-rw-r--r--drivers/net/phy/marvell10g.c177
-rw-r--r--drivers/net/phy/mdio-ipq8064.c166
-rw-r--r--drivers/net/phy/mdio-xpcs.c612
-rw-r--r--drivers/net/phy/mscc.c10
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/phy/phylink.c13
-rw-r--r--drivers/net/slip/slhc.c14
-rw-r--r--drivers/net/slip/slip.c7
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c106
-rw-r--r--drivers/net/usb/cdc_ncm.c411
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c8
-rw-r--r--drivers/net/usb/r8152.c15
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/virtio_net.c109
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c392
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h165
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c178
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c991
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c380
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1051
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h163
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c455
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c64
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c651
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c47
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c67
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h329
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h45
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c37
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h32
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c50
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c32
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/pci/pci-bridge-emul.c14
-rw-r--r--drivers/pci/pci.c57
-rw-r--r--drivers/perf/arm_pmu_acpi.c7
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c10
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c148
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c27
-rw-r--r--drivers/phy/phy-core.c18
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c10
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c13
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-scu.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c4
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_vmw.c144
-rw-r--r--drivers/regulator/stm32-vrefbuf.c3
-rw-r--r--drivers/remoteproc/Kconfig6
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/qcom_q6v5_ipa_notify.c85
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c38
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/s390/cio/qdio_setup.c11
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c181
-rw-r--r--drivers/s390/net/qeth_core_sys.c9
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_sys.c9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c18
-rw-r--r--drivers/scsi/sd_zbc.c7
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/soc/imx/soc-imx-scu.c2
-rw-r--r--drivers/spi/atmel-quadspi.c11
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c103
-rw-r--r--drivers/spi/spi-pxa2xx.c23
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c3
-rw-r--r--drivers/spi/spi.c32
-rw-r--r--drivers/spi/spidev.c5
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c4
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c2
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt2
-rw-r--r--drivers/tee/amdtee/core.c48
-rw-r--r--drivers/tty/serdev/core.c10
-rw-r--r--drivers/tty/serial/8250/8250_exar.c33
-rw-r--r--drivers/tty/serial/fsl_lpuart.c41
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/vt/selection.c27
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/cdns3/gadget.c19
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/gadget.c9
-rw-r--r--drivers/usb/misc/usb251xb.c20
-rw-r--r--drivers/usb/storage/unusual_devs.h6
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/led_bl.c260
-rw-r--r--drivers/video/console/vgacon.c3
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/watchdog/wdat_wdt.c25
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c10
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c5
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c9
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h7
-rw-r--r--fs/cifs/cifsproto.h5
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/cifs/file.c19
-rw-r--r--fs/cifs/inode.c16
-rw-r--r--fs/cifs/smb1ops.c2
-rw-r--r--fs/cifs/smb2inode.c4
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.c1
-rw-r--r--fs/crypto/keysetup.c9
-rw-r--r--fs/debugfs/file.c17
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/fat/inode.c19
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/io-wq.c77
-rw-r--r--fs/io-wq.h16
-rw-r--r--fs/io_uring.c157
-rw-r--r--fs/jbd2/transaction.c8
-rw-r--r--fs/locks.c14
-rw-r--r--fs/open.c3
-rw-r--r--fs/zonefs/Kconfig1
-rw-r--r--fs/zonefs/super.c8
-rw-r--r--include/acpi/actypes.h3
-rw-r--r--include/crypto/curve25519.h6
-rw-r--r--include/drm/drm_dp_mst_helper.h4
-rw-r--r--include/drm/drm_gem_shmem_helper.h5
-rw-r--r--include/linux/bitfield.h14
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h41
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/debugfs.h13
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/ethtool.h64
-rw-r--r--include/linux/filter.h37
-rw-r--r--include/linux/inet_diag.h45
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/mdio-xpcs.h41
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mlx5/eswitch.h38
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h37
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h2
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/netdevice.h11
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/pci.h13
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h1
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/preempt.h30
-rw-r--r--include/linux/remoteproc/qcom_q6v5_ipa_notify.h82
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/usb/cdc_ncm.h15
-rw-r--r--include/linux/workqueue.h16
-rw-r--r--include/net/act_api.h4
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/bpf_sk_storage.h27
-rw-r--r--include/net/dn_fib.h2
-rw-r--r--include/net/dsa.h6
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/flow_offload.h106
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_fib.h4
-rw-r--r--include/net/lwtunnel.h2
-rw-r--r--include/net/mip6.h2
-rw-r--r--include/net/mld.h6
-rw-r--r--include/net/mpls_iptunnel.h2
-rw-r--r--include/net/ndisc.h6
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h32
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/pie.h31
-rw-r--r--include/net/pkt_cls.h28
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock_reuseport.h2
-rw-r--r--include/net/tc_act/tc_ct.h19
-rw-r--r--include/soc/mscc/ocelot.h92
-rw-r--r--include/soc/mscc/ocelot_dev.h2
-rw-r--r--include/soc/mscc/ocelot_vcap.h205
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/devlink.h1
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/ethtool_netlink.h82
-rw-r--r--include/uapi/linux/if_arcnet.h6
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/inet_diag.h5
-rw-r--r--include/uapi/linux/net_dropmon.h4
-rw-r--r--include/uapi/linux/pkt_cls.h22
-rw-r--r--include/uapi/linux/sock_diag.h26
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/xen/interface/io/tpmif.h2
-rw-r--r--include/xen/xenbus.h3
-rw-r--r--kernel/bpf/bpf_struct_ops.c2
-rw-r--r--kernel/bpf/hashtab.c174
-rw-r--r--kernel/bpf/lpm_trie.c14
-rw-r--r--kernel/bpf/percpu_freelist.c20
-rw-r--r--kernel/bpf/stackmap.c18
-rw-r--r--kernel/bpf/syscall.c42
-rw-r--r--kernel/bpf/trampoline.c9
-rw-r--r--kernel/bpf/verifier.c40
-rw-r--r--kernel/cgroup/cgroup-v1.c3
-rw-r--r--kernel/cgroup/cgroup.c43
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/pid.c10
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/seccomp.c4
-rw-r--r--kernel/trace/blktrace.c117
-rw-r--r--kernel/trace/bpf_trace.c7
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace_uprobe.c11
-rw-r--r--kernel/workqueue.c14
-rw-r--r--lib/test_bpf.c4
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/memory.c35
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/mprotect.c38
-rw-r--r--mm/z3fold.c1
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/translation-table.c8
-rw-r--r--net/bpf/test_run.c8
-rw-r--r--net/caif/caif_dev.c3
-rw-r--r--net/core/bpf_sk_storage.c285
-rw-r--r--net/core/datagram.c25
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/devlink.c37
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/flow_offload.c7
-rw-r--r--net/core/netclassid_cgroup.c47
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/skmsg.c8
-rw-r--r--net/core/sock.c5
-rw-r--r--net/dccp/ccid.h2
-rw-r--r--net/dccp/diag.c9
-rw-r--r--net/dsa/dsa_priv.h2
-rw-r--r--net/dsa/port.c44
-rw-r--r--net/dsa/slave.c72
-rw-r--r--net/dsa/tag_ocelot.c3
-rw-r--r--net/ethtool/Makefile3
-rw-r--r--net/ethtool/bitset.c94
-rw-r--r--net/ethtool/bitset.h4
-rw-r--r--net/ethtool/channels.c227
-rw-r--r--net/ethtool/common.c31
-rw-r--r--net/ethtool/common.h3
-rw-r--r--net/ethtool/debug.c6
-rw-r--r--net/ethtool/features.c304
-rw-r--r--net/ethtool/ioctl.c157
-rw-r--r--net/ethtool/linkinfo.c6
-rw-r--r--net/ethtool/linkmodes.c6
-rw-r--r--net/ethtool/netlink.c99
-rw-r--r--net/ethtool/netlink.h15
-rw-r--r--net/ethtool/privflags.c209
-rw-r--r--net/ethtool/rings.c200
-rw-r--r--net/ethtool/wol.c5
-rw-r--r--net/hsr/hsr_debugfs.c5
-rw-r--r--net/hsr/hsr_device.c64
-rw-r--r--net/hsr/hsr_device.h3
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_main.c3
-rw-r--r--net/hsr/hsr_main.h1
-rw-r--r--net/hsr/hsr_netlink.c49
-rw-r--r--net/hsr/hsr_slave.c63
-rw-r--r--net/hsr/hsr_slave.h2
-rw-r--r--net/ieee802154/nl_policy.c6
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/gre_demux.c12
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c56
-rw-r--r--net/ipv4/inet_diag.c343
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c4
-rw-r--r--net/ipv4/nexthop.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/raw_diag.c29
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_diag.c8
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/udp.c16
-rw-r--r--net/ipv4/udp_diag.c46
-rw-r--r--net/ipv6/addrconf.c57
-rw-r--r--net/ipv6/ah6.c4
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_fib.c8
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c2
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c4
-rw-r--r--net/ipv6/seg6_local.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c21
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/kcm/kcmsock.c4
-rw-r--r--net/l2tp/l2tp_core.h2
-rw-r--r--net/mac80211/mesh_hwmp.c3
-rw-r--r--net/mpls/internal.h4
-rw-r--r--net/mptcp/options.c35
-rw-r--r--net/mptcp/protocol.c46
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_flow_table_core.c60
-rw-r--r--net/netfilter/nf_flow_table_ip.c15
-rw-r--r--net/netfilter/nf_flow_table_offload.c27
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c22
-rw-r--r--net/netfilter/nfnetlink_cthelper.c2
-rw-r--r--net/netfilter/nft_chain_nat.c1
-rw-r--r--net/netfilter/nft_payload.c1
-rw-r--r--net/netfilter/nft_tunnel.c2
-rw-r--r--net/netfilter/x_tables.c6
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/nfc/hci/core.c19
-rw-r--r--net/nfc/netlink.c4
-rw-r--r--net/openvswitch/datapath.c1
-rw-r--r--net/packet/af_packet.c13
-rw-r--r--net/qrtr/ns.c56
-rw-r--r--net/qrtr/qrtr.c10
-rw-r--r--net/qrtr/qrtr.h2
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_api.c36
-rw-r--r--net/sched/act_ct.c569
-rw-r--r--net/sched/cls_api.c106
-rw-r--r--net/sched/em_ipt.c2
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_fifo.c97
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sched/sch_fq_pie.c1
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_ingress.c11
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_pie.c49
-rw-r--r--net/sched/sch_taprio.c13
-rw-r--r--net/sctp/diag.c15
-rw-r--r--net/smc/smc_ib.c1
-rw-r--r--net/tipc/netlink.c1
-rw-r--r--net/unix/af_unix.c28
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/xdp/xsk_queue.h4
-rw-r--r--net/xfrm/espintcp.c2
-rwxr-xr-xscripts/bpf_helpers_doc.py2
-rwxr-xr-x[-rw-r--r--]scripts/parse-maintainers.pl0
-rw-r--r--sound/mips/sgio2audio.c6
-rw-r--r--sound/pci/bt87x.c7
-rw-r--r--sound/pci/hda/patch_realtek.c31
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/pcm512x.c8
-rw-r--r--sound/soc/codecs/rt1015.c3
-rw-r--r--sound/soc/codecs/tas2562.c5
-rw-r--r--sound/soc/intel/skylake/skl-debug.c32
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.c4
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c6
-rw-r--r--sound/soc/soc-component.c2
-rw-r--r--sound/soc/soc-compress.c2
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-pcm.c18
-rw-r--r--sound/soc/soc-topology.c17
-rw-r--r--sound/soc/sof/ipc.c2
-rw-r--r--sound/soc/stm/stm32_sai_sub.c18
-rw-r--r--tools/arch/x86/include/asm/msr-index.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst19
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst3
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool6
-rw-r--r--tools/bpf/bpftool/feature.c283
-rw-r--r--tools/bpf/bpftool/main.h3
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/perf/Documentation/perf-config.txt74
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c18
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c17
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c17
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c17
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-probe.c6
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/include/bpf/pid_filter.h2
-rw-r--r--tools/perf/include/bpf/stdio.h2
-rw-r--r--tools/perf/include/bpf/unistd.h2
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh2
-rw-r--r--tools/perf/ui/browsers/annotate.c19
-rw-r--r--tools/perf/ui/gtk/annotate.c2
-rw-r--r--tools/perf/util/annotate.c194
-rw-r--r--tools/perf/util/annotate.h9
-rw-r--r--tools/perf/util/auxtrace.c22
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/config.c12
-rw-r--r--tools/perf/util/config.h1
-rw-r--r--tools/perf/util/probe-file.c28
-rwxr-xr-xtools/testing/ktest/ktest.pl16
-rw-r--r--tools/testing/ktest/sample.conf22
-rw-r--r--tools/testing/selftests/.gitignore5
-rw-r--r--tools/testing/selftests/bpf/Makefile5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c30
-rw-r--r--tools/testing/selftests/bpf/test_bpftool.py178
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool.sh5
-rw-r--r--tools/testing/selftests/bpf/test_progs.c25
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_ets.sh14
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh6
-rw-r--r--tools/testing/selftests/lkdtm/.gitignore2
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh34
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh27
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_ets.sh9
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_tests.sh10
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh32
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c45
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh24
-rw-r--r--tools/testing/selftests/net/reuseaddr_ports_exhausted.c162
-rwxr-xr-xtools/testing/selftests/net/reuseaddr_ports_exhausted.sh35
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/config7
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/basic.json220
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/trace.h1
1267 files changed, 41938 insertions, 10845 deletions
diff --git a/.clang-format b/.clang-format
index 196ca317bd1f..6ec5558b516b 100644
--- a/.clang-format
+++ b/.clang-format
@@ -86,6 +86,8 @@ ForEachMacros:
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
+ - 'bitmap_for_each_clear_region'
+ - 'bitmap_for_each_set_region'
- 'blkg_for_each_descendant_post'
- 'blkg_for_each_descendant_pre'
- 'blk_queue_for_each_rl'
@@ -115,6 +117,7 @@ ForEachMacros:
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
+ - 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
@@ -136,9 +139,10 @@ ForEachMacros:
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
+ - 'for_each_card_auxs'
+ - 'for_each_card_auxs_safe'
- 'for_each_card_components'
- - 'for_each_card_links'
- - 'for_each_card_links_safe'
+ - 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
@@ -166,6 +170,7 @@ ForEachMacros:
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
+ - 'for_each_efi_handle'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
@@ -190,6 +195,7 @@ ForEachMacros:
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
+ - 'for_each_member'
- 'for_each_memblock'
- 'for_each_memblock_type'
- 'for_each_memcg_cache_index'
@@ -200,9 +206,11 @@ ForEachMacros:
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_net'
+ - 'for_each_net_continue_reverse'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
+ - 'for_each_netdev_continue_reverse'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
@@ -254,10 +262,10 @@ ForEachMacros:
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dai'
- 'for_each_rtd_codec_dai_rollback'
- - 'for_each_rtdcom'
- - 'for_each_rtdcom_safe'
+ - 'for_each_rtd_components'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
+ - 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
@@ -267,6 +275,7 @@ ForEachMacros:
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
+ - 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
@@ -330,6 +339,7 @@ ForEachMacros:
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
+ - 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
@@ -351,6 +361,7 @@ ForEachMacros:
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
+ - 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
@@ -444,10 +455,16 @@ ForEachMacros:
- 'virtio_device_for_each_vq'
- 'xa_for_each'
- 'xa_for_each_marked'
+ - 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
+ - 'xbc_array_for_each_value'
+ - 'xbc_for_each_key_value'
+ - 'xbc_node_for_each_array_value'
+ - 'xbc_node_for_each_child'
+ - 'xbc_node_for_each_key_value'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
diff --git a/.gitignore b/.gitignore
index 2763fce8766c..72ef86a5570d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -100,10 +100,6 @@ modules.order
/include/ksym/
/arch/*/include/generated/
-# Generated lkdtm tests
-/tools/testing/selftests/lkdtm/*.sh
-!/tools/testing/selftests/lkdtm/run.sh
-
# stgit generated dirs
patches-*
diff --git a/Documentation/admin-guide/acpi/fan_performance_states.rst b/Documentation/admin-guide/acpi/fan_performance_states.rst
index 21d233ca50d8..98fe5c333121 100644
--- a/Documentation/admin-guide/acpi/fan_performance_states.rst
+++ b/Documentation/admin-guide/acpi/fan_performance_states.rst
@@ -18,7 +18,7 @@ may look as follows::
$ ls -l /sys/bus/acpi/devices/INT3404:00/
total 0
-...
+ ...
-r--r--r-- 1 root root 4096 Dec 13 20:38 state0
-r--r--r-- 1 root root 4096 Dec 13 20:38 state1
-r--r--r-- 1 root root 4096 Dec 13 20:38 state10
@@ -38,7 +38,7 @@ where each of the "state*" files represents one performance state of the fan
and contains a colon-separated list of 5 integer numbers (fields) with the
following interpretation::
-control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw
+ control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw
* ``control_percent``: The percent value to be used to set the fan speed to a
specific level using the _FSL object (0-100).
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index dbc22d684627..c07815d230bc 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -136,6 +136,10 @@
dynamic table installation which will install SSDT
tables to /sys/firmware/acpi/tables/dynamic.
+ acpi_no_watchdog [HW,ACPI,WDT]
+ Ignore the ACPI-based watchdog interface (WDAT) and let
+ a native driver control the watchdog device instead.
+
acpi_rsdp= [ACPI,EFI,KEXEC]
Pass the RSDP address to the kernel, mostly used
on machines running EFI runtime service to boot the
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
index f493d69e6194..dc102c4e4a78 100644
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt
@@ -102,7 +102,7 @@ Required sub-node properties:
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/power/power-domain.yaml
[3] Documentation/devicetree/bindings/thermal/thermal.txt
-[4] Documentation/devicetree/bindings/sram/sram.txt
+[4] Documentation/devicetree/bindings/sram/sram.yaml
[5] Documentation/devicetree/bindings/reset/reset.txt
Example:
diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt
index 7b83ef43b418..dd04d9d9a1b8 100644
--- a/Documentation/devicetree/bindings/arm/arm,scpi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scpi.txt
@@ -109,7 +109,7 @@ Required properties:
[0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/thermal/thermal.txt
-[3] Documentation/devicetree/bindings/sram/sram.txt
+[3] Documentation/devicetree/bindings/sram/sram.yaml
[4] Documentation/devicetree/bindings/power/power-domain.yaml
Example:
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
index b82b6a0ae6f7..8c7a4908a849 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
@@ -62,7 +62,7 @@ Timer node:
Syscon reboot node:
-See Documentation/devicetree/bindings/power/reset/syscon-reboot.txt for the
+See Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml for the
detailed list of properties, the two values defined below are specific to the
BCM6328-style timer:
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 7a9c3ce2dbef..0d5b61056b10 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -216,7 +216,7 @@ properties:
$ref: '/schemas/types.yaml#/definitions/phandle-array'
description: |
List of phandles to idle state nodes supported
- by this cpu (see ./idle-states.txt).
+ by this cpu (see ./idle-states.yaml).
capacity-dmips-mhz:
$ref: '/schemas/types.yaml#/definitions/uint32'
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index a8e0b4a813ed..0e17e1f6fb80 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -160,7 +160,7 @@ properties:
items:
- enum:
- armadeus,imx6dl-apf6 # APF6 (Solo) SoM
- - armadeus,imx6dl-apf6dldev # APF6 (Solo) SoM on APF6Dev board
+ - armadeus,imx6dl-apf6dev # APF6 (Solo) SoM on APF6Dev board
- eckelmann,imx6dl-ci4x10
- emtrion,emcon-mx6 # emCON-MX6S or emCON-MX6DL SoM
- emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
index 115c5be0bd0b..8defacc44dd5 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
@@ -1,7 +1,7 @@
* Hisilicon Hi3519 System Controller Block
This bindings use the following binding:
-Documentation/devicetree/bindings/mfd/syscon.txt
+Documentation/devicetree/bindings/mfd/syscon.yaml
Required properties:
- compatible: "hisilicon,hi3519-sysctrl".
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
index 06df04cc827a..6ce0b212ec6d 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
@@ -81,4 +81,4 @@ Example:
};
};
-[1]. Documentation/devicetree/bindings/arm/idle-states.txt
+[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
diff --git a/Documentation/devicetree/bindings/arm/omap/mpu.txt b/Documentation/devicetree/bindings/arm/omap/mpu.txt
index f301e636fd52..e41490e6979c 100644
--- a/Documentation/devicetree/bindings/arm/omap/mpu.txt
+++ b/Documentation/devicetree/bindings/arm/omap/mpu.txt
@@ -17,7 +17,7 @@ am335x and am437x only:
- pm-sram: Phandles to ocmcram nodes to be used for power management.
First should be type 'protect-exec' for the driver to use to copy
and run PM functions, second should be regular pool to be used for
- data region for code. See Documentation/devicetree/bindings/sram/sram.txt
+ data region for code. See Documentation/devicetree/bindings/sram/sram.yaml
for more details.
Examples:
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 8ef85420b2ab..5e66934455bb 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -100,13 +100,14 @@ properties:
bindings in [1]) must specify this property.
[1] Kernel documentation - ARM idle states bindings
- Documentation/devicetree/bindings/arm/idle-states.txt
-
- "#power-domain-cells":
- description:
- The number of cells in a PM domain specifier as per binding in [3].
- Must be 0 as to represent a single PM domain.
+ Documentation/devicetree/bindings/arm/idle-states.yaml
+patternProperties:
+ "^power-domain-":
+ allOf:
+ - $ref: "../power/power-domain.yaml#"
+ type: object
+ description: |
ARM systems can have multiple cores, sometimes in an hierarchical
arrangement. This often, but not always, maps directly to the processor
power topology of the system. Individual nodes in a topology have their
@@ -122,14 +123,8 @@ properties:
helps to implement support for OSI mode and OS implementations may choose
to mandate it.
- [3] Documentation/devicetree/bindings/power/power_domain.txt
- [4] Documentation/devicetree/bindings/power/domain-idle-state.txt
-
- power-domains:
- $ref: '/schemas/types.yaml#/definitions/phandle-array'
- description:
- List of phandles and PM domain specifiers, as defined by bindings of the
- PM domain provider.
+ [3] Documentation/devicetree/bindings/power/power-domain.yaml
+ [4] Documentation/devicetree/bindings/power/domain-idle-state.yaml
required:
- compatible
@@ -199,7 +194,7 @@ examples:
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
enable-method = "psci";
power-domains = <&CPU_PD0>;
@@ -208,7 +203,7 @@ examples:
CPU1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x100>;
enable-method = "psci";
power-domains = <&CPU_PD1>;
@@ -224,6 +219,9 @@ examples:
exit-latency-us = <10>;
min-residency-us = <100>;
};
+ };
+
+ domain-idle-states {
CLUSTER_RET: cluster-retention {
compatible = "domain-idle-state";
@@ -247,19 +245,19 @@ examples:
compatible = "arm,psci-1.0";
method = "smc";
- CPU_PD0: cpu-pd0 {
+ CPU_PD0: power-domain-cpu0 {
#power-domain-cells = <0>;
domain-idle-states = <&CPU_PWRDN>;
power-domains = <&CLUSTER_PD>;
};
- CPU_PD1: cpu-pd1 {
+ CPU_PD1: power-domain-cpu1 {
#power-domain-cells = <0>;
domain-idle-states = <&CPU_PWRDN>;
power-domains = <&CLUSTER_PD>;
};
- CLUSTER_PD: cluster-pd {
+ CLUSTER_PD: power-domain-cluster {
#power-domain-cells = <0>;
domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
};
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
index 68917bb7c7e8..55f7938c4826 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
@@ -52,7 +52,7 @@ required:
examples:
- |
- mlahb: ahb {
+ mlahb: ahb@38000000 {
compatible = "st,mlahb", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
index 9fe11ceecdba..80973619342d 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
@@ -70,7 +70,6 @@ examples:
#size-cells = <0>;
pmic@3e3 {
- compatible = "...";
reg = <0x3e3>;
/* ... */
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
index 69cfa4a3d562..c604822cda07 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
@@ -40,7 +40,7 @@ additionalProperties: false
examples:
- |
- osc24M: clk@01c20050 {
+ osc24M: clk@1c20050 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-osc-clk";
reg = <0x01c20050 0x4>;
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
index 07f38def7dc3..43963c3062c8 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
@@ -41,7 +41,7 @@ additionalProperties: false
examples:
- |
- clk@0600005c {
+ clk@600005c {
#clock-cells = <0>;
compatible = "allwinner,sun9i-a80-gt-clk";
reg = <0x0600005c 0x4>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
index 17f87178f6b8..3647007f82ca 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -42,7 +42,7 @@ properties:
be part of GCC and hence the TSENS properties can also be part
of the GCC/clock-controller node.
For more details on the TSENS properties please refer
- Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+ Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
nvmem-cell-names:
minItems: 1
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
index 5d5d39665119..6009324be967 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
@@ -49,11 +49,7 @@ examples:
resets = <&tcon_ch0_clk 0>;
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tve0_in_tcon0: endpoint@0 {
- reg = <0>;
+ tve0_in_tcon0: endpoint {
remote-endpoint = <&tcon0_out_tve0>;
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
index 6d72b3d11fbc..c21103869923 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
@@ -79,21 +79,15 @@ examples:
#size-cells = <0>;
anx6345_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- anx6345_in_tcon0: endpoint@0 {
- reg = <0>;
+ anx6345_in_tcon0: endpoint {
remote-endpoint = <&tcon0_out_anx6345>;
};
};
anx6345_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- anx6345_out_panel: endpoint@0 {
- reg = <0>;
+ anx6345_out_panel: endpoint {
remote-endpoint = <&panel_in_edp>;
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
index 4ebcea7d0c63..a614644c9849 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -37,6 +37,8 @@ examples:
dsi@ff450000 {
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0xff450000 0x1000>;
+
panel@0 {
compatible = "leadtek,ltk500hd1829";
reg = <0>;
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
index 186e5e1c8fa3..22c91beb0541 100644
--- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -37,6 +37,8 @@ examples:
dsi@ff450000 {
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0xff450000 0x1000>;
+
panel@0 {
compatible = "xinpeng,xpp055c272";
reg = <0>;
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
index 678776b6012a..1db608c9eef5 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -174,10 +174,6 @@ examples:
};
};
- soc@1c00000 {
- lcdc0: lcdc@1c0c000 {
- compatible = "allwinner,sun4i-a10-lcdc";
- };
- };
+ lcdc0: lcdc { };
...
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 7bf1bb444812..aac617acb64f 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -37,7 +37,7 @@ Optional nodes:
supports a single port with a single endpoint.
- See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
- Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
+ Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt for connecting
tfp410 DVI encoder or lcd panel to lcdc
[1] There is an errata about AM335x color wiring. For 16-bit color mode
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
index 8b5c346f23f6..34780d7535b8 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
@@ -143,7 +143,7 @@ examples:
#size-cells = <2>;
dma-coherent;
dma-ranges;
- ranges;
+ ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0x05000000>;
ti,sci-dev-id = <118>;
@@ -169,16 +169,4 @@ examples:
ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
};
};
-
- mcasp0: mcasp@02B00000 {
- dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
- dma-names = "tx", "rx";
- };
-
- crypto: crypto@4E00000 {
- compatible = "ti,sa2ul-crypto";
-
- dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>;
- dma-names = "tx", "rx1", "rx2";
- };
};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 4ea6a8789699..e8b99adcb1bd 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -84,31 +84,31 @@ examples:
gpu_opp_table: opp_table0 {
compatible = "operating-points-v2";
- opp@533000000 {
+ opp-533000000 {
opp-hz = /bits/ 64 <533000000>;
opp-microvolt = <1250000>;
};
- opp@450000000 {
+ opp-450000000 {
opp-hz = /bits/ 64 <450000000>;
opp-microvolt = <1150000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1125000>;
};
- opp@350000000 {
+ opp-350000000 {
opp-hz = /bits/ 64 <350000000>;
opp-microvolt = <1075000>;
};
- opp@266000000 {
+ opp-266000000 {
opp-hz = /bits/ 64 <266000000>;
opp-microvolt = <1025000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <925000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <912500>;
};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
index 36f59b3ade71..8d966f3ff3db 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
@@ -138,31 +138,31 @@ examples:
gpu_opp_table: opp_table0 {
compatible = "operating-points-v2";
- opp@533000000 {
+ opp-533000000 {
opp-hz = /bits/ 64 <533000000>;
opp-microvolt = <1250000>;
};
- opp@450000000 {
+ opp-450000000 {
opp-hz = /bits/ 64 <450000000>;
opp-microvolt = <1150000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1125000>;
};
- opp@350000000 {
+ opp-350000000 {
opp-hz = /bits/ 64 <350000000>;
opp-microvolt = <1075000>;
};
- opp@266000000 {
+ opp-266000000 {
opp-hz = /bits/ 64 <266000000>;
opp-microvolt = <1025000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <925000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <912500>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
index f46de17c0878..cc3c8ea6a894 100644
--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
@@ -123,7 +123,7 @@ examples:
samsung,syscon-phandle = <&pmu_system_controller>;
/* NTC thermistor is a hwmon device */
- ncp15wb473@0 {
+ ncp15wb473 {
compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <47000>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index d7c3262b2494..c99ed3934d7e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -62,7 +62,7 @@ required:
examples:
- |
- i2c@00000000 {
+ i2c {
#address-cells = <1>;
#size-cells = <0>;
gt928@5d {
diff --git a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
index c864a46cddcf..f5021214edec 100644
--- a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
@@ -1,7 +1,7 @@
Texas Instruments TWL family (twl4030) pwrbutton module
This module is part of the TWL4030. For more details about the whole
-chip see Documentation/devicetree/bindings/mfd/twl-familly.txt.
+chip see Documentation/devicetree/bindings/mfd/twl-family.txt.
This module provides a simple power button event via an Interrupt.
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index d97d099b87e5..c60b994fe116 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -85,7 +85,7 @@ properties:
# LED will act as a back-light, controlled by the framebuffer system
- backlight
# LED will turn on (but for leds-gpio see "default-state" property in
- # Documentation/devicetree/bindings/leds/leds-gpio.txt)
+ # Documentation/devicetree/bindings/leds/leds-gpio.yaml)
- default-on
# LED "double" flashes at a load average based rate
- heartbeat
diff --git a/Documentation/devicetree/bindings/leds/register-bit-led.txt b/Documentation/devicetree/bindings/leds/register-bit-led.txt
index cf1ea403ba7a..c7af6f70a97b 100644
--- a/Documentation/devicetree/bindings/leds/register-bit-led.txt
+++ b/Documentation/devicetree/bindings/leds/register-bit-led.txt
@@ -5,7 +5,7 @@ where single bits in a certain register can turn on/off a
single LED. The register bit LEDs appear as children to the
syscon device, with the proper compatible string. For the
syscon bindings see:
-Documentation/devicetree/bindings/mfd/syscon.txt
+Documentation/devicetree/bindings/mfd/syscon.yaml
Each LED is represented as a sub-node of the syscon device. Each
node's name represents the name of the corresponding LED.
diff --git a/Documentation/devicetree/bindings/media/ti,cal.yaml b/Documentation/devicetree/bindings/media/ti,cal.yaml
index 1ea784179536..5e066629287d 100644
--- a/Documentation/devicetree/bindings/media/ti,cal.yaml
+++ b/Documentation/devicetree/bindings/media/ti,cal.yaml
@@ -177,7 +177,7 @@ examples:
};
};
- i2c5: i2c@4807c000 {
+ i2c {
clock-frequency = <400000>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
index 44d71469c914..63f674ffeb4f 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
@@ -32,7 +32,7 @@ Required only for "ti,emif-am3352" and "ti,emif-am4372":
- sram : Phandles for generic sram driver nodes,
first should be type 'protect-exec' for the driver to use to copy
and run PM functions, second should be regular pool to be used for
- data region for code. See Documentation/devicetree/bindings/sram/sram.txt
+ data region for code. See Documentation/devicetree/bindings/sram/sram.yaml
for more details.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mfd/max77650.yaml b/Documentation/devicetree/bindings/mfd/max77650.yaml
index 4a70f875a6eb..480385789394 100644
--- a/Documentation/devicetree/bindings/mfd/max77650.yaml
+++ b/Documentation/devicetree/bindings/mfd/max77650.yaml
@@ -97,14 +97,14 @@ examples:
regulators {
compatible = "maxim,max77650-regulator";
- max77650_ldo: regulator@0 {
+ max77650_ldo: regulator-ldo {
regulator-compatible = "ldo";
regulator-name = "max77650-ldo";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <2937500>;
};
- max77650_sbb0: regulator@1 {
+ max77650_sbb0: regulator-sbb0 {
regulator-compatible = "sbb0";
regulator-name = "max77650-sbb0";
regulator-min-microvolt = <800000>;
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
index 4f62143afd24..a5ced46bbde9 100644
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -26,8 +26,8 @@ Required properties:
ldo6, ldo7, ldo8
- xxx-supply: Input voltage supply regulator.
- These entries are require if regulators are enabled for a device. Missing of these
- properties can cause the regulator registration fails.
+ These entries are required if regulators are enabled for a device. Missing these
+ properties can cause the regulator registration to fail.
If some of input supply is powered through battery or always-on supply then
also it is require to have these parameters with proper node handle of always
on power supply.
diff --git a/Documentation/devicetree/bindings/mfd/twl-familly.txt b/Documentation/devicetree/bindings/mfd/twl-family.txt
index 56f244b5d8a4..56f244b5d8a4 100644
--- a/Documentation/devicetree/bindings/mfd/twl-familly.txt
+++ b/Documentation/devicetree/bindings/mfd/twl-family.txt
diff --git a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
index 088eff9ddb78..e0f901edc063 100644
--- a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
+++ b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
@@ -20,7 +20,7 @@ RAVE SP consists of the following sub-devices:
Device Description
------ -----------
rave-sp-wdt : Watchdog
-rave-sp-nvmem : Interface to onborad EEPROM
+rave-sp-nvmem : Interface to onboard EEPROM
rave-sp-backlight : Display backlight
rave-sp-hwmon : Interface to onboard hardware sensors
rave-sp-leds : Interface to onboard LEDs
diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
index bb7e896cb644..9134e9bcca56 100644
--- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
@@ -26,7 +26,7 @@ For generic IOMMU bindings, see
Documentation/devicetree/bindings/iommu/iommu.txt.
For arm-smmu binding, see:
-Documentation/devicetree/bindings/iommu/arm,smmu.txt.
+Documentation/devicetree/bindings/iommu/arm,smmu.yaml.
Required properties:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 3c0df4016a12..8fded83c519a 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -370,6 +370,7 @@ examples:
mmc3: mmc@1c12000 {
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0x1c12000 0x200>;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins_a>;
vmmc-supply = <&reg_vmmc3>;
diff --git a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
index f3893c4d3c6a..d2eada5044b2 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
@@ -27,7 +27,7 @@ Required properties of NAND chips:
- reg: shall contain the native Chip Select ids from 0 to max supported by
the cadence nand flash controller
-See Documentation/devicetree/bindings/mtd/nand.txt for more details on
+See Documentation/devicetree/bindings/mtd/nand-controller.yaml for more details on
generic bindings.
Example:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 48a7f916c5e4..88b57b0ca1f4 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -45,7 +45,7 @@ Optional properties:
switch queue
- resets: a single phandle and reset identifier pair. See
- Documentation/devicetree/binding/reset/reset.txt for details.
+ Documentation/devicetree/bindings/reset/reset.txt for details.
- reset-names: If the "reset" property is specified, this property should have
the value "switch" to denote the switch reset line.
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index 250f8d8cdce4..c00fb0d22c7b 100644
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -110,6 +110,13 @@ PROPERTIES
Usage: required
Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
+- fsl,erratum-a050385
+ Usage: optional
+ Value type: boolean
+ Definition: A boolean property. Indicates the presence of the
+ erratum A050385 which indicates that DMA transactions that are
+ split can result in a FMan lock.
+
=============================================================================
FMan MURAM Node
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
new file mode 100644
index 000000000000..140f15245654
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -0,0 +1,198 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qcom,ipa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm IP Accelerator (IPA)
+
+maintainers:
+ - Alex Elder <elder@kernel.org>
+
+description:
+ This binding describes the Qualcomm IPA. The IPA is capable of offloading
+ certain network processing tasks (e.g. filtering, routing, and NAT) from
+ the main processor.
+
+ The IPA sits between multiple independent "execution environments,"
+ including the Application Processor (AP) and the modem. The IPA presents
+ a Generic Software Interface (GSI) to each execution environment.
+ The GSI is an integral part of the IPA, but it is logically isolated
+ and has a distinct interrupt and a separately-defined address space.
+
+ See also soc/qcom/qcom,smp2p.txt and interconnect/interconnect.txt.
+
+ - |
+ -------- ---------
+ | | | |
+ | AP +<---. .----+ Modem |
+ | +--. | | .->+ |
+ | | | | | | | |
+ -------- | | | | ---------
+ v | v |
+ --+-+---+-+--
+ | GSI |
+ |-----------|
+ | |
+ | IPA |
+ | |
+ -------------
+
+properties:
+ compatible:
+ const: "qcom,sdm845-ipa"
+
+ reg:
+ items:
+ - description: IPA registers
+ - description: IPA shared memory
+ - description: GSI registers
+
+ reg-names:
+ items:
+ - const: ipa-reg
+ - const: ipa-shared
+ - const: gsi
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: core
+
+ interrupts:
+ items:
+ - description: IPA interrupt (hardware IRQ)
+ - description: GSI interrupt (hardware IRQ)
+ - description: Modem clock query interrupt (smp2p interrupt)
+ - description: Modem setup ready interrupt (smp2p interrupt)
+
+ interrupt-names:
+ items:
+ - const: ipa
+ - const: gsi
+ - const: ipa-clock-query
+ - const: ipa-setup-ready
+
+ interconnects:
+ items:
+ - description: Interconnect path between IPA and main memory
+ - description: Interconnect path between IPA and internal memory
+ - description: Interconnect path between IPA and the AP subsystem
+
+ interconnect-names:
+ items:
+ - const: memory
+ - const: imem
+ - const: config
+
+ qcom,smem-states:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: State bits used in by the AP to signal the modem.
+ items:
+ - description: Whether the "ipa-clock-enabled" state bit is valid
+ - description: Whether the IPA clock is enabled (if valid)
+
+ qcom,smem-state-names:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string-array
+ description: The names of the state bits used for SMP2P output
+ items:
+ - const: ipa-clock-enabled-valid
+ - const: ipa-clock-enabled
+
+ modem-init:
+ type: boolean
+ description:
+ If present, it indicates that the modem is responsible for
+ performing early IPA initialization, including loading and
+ validating firwmare used by the GSI.
+
+ modem-remoteproc:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ This defines the phandle to the remoteproc node representing
+ the modem subsystem. This is requied so the IPA driver can
+ receive and act on notifications of modem up/down events.
+
+ memory-region:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description:
+ If present, a phandle for a reserved memory area that holds
+ the firmware passed to Trust Zone for authentication. Required
+ when Trust Zone (not the modem) performs early initialization.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - interconnects
+ - qcom,smem-states
+ - modem-remoteproc
+
+oneOf:
+ - required:
+ - modem-init
+ - required:
+ - memory-region
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+
+ smp2p-mpss {
+ compatible = "qcom,smp2p";
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+ ipa@1e40000 {
+ compatible = "qcom,sdm845-ipa";
+
+ modem-init;
+ modem-remoteproc = <&mss_pil>;
+
+ reg = <0 0x1e40000 0 0x7000>,
+ <0 0x1e47000 0 0x2000>,
+ <0 0x1e04000 0 0x2c000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc 0 311 IRQ_TYPE_EDGE_RISING>,
+ <&intc 0 432 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects =
+ <&rsc_hlos MASTER_IPA &rsc_hlos SLAVE_EBI1>,
+ <&rsc_hlos MASTER_IPA &rsc_hlos SLAVE_IMEM>,
+ <&rsc_hlos MASTER_APPSS_PROC &rsc_hlos SLAVE_IPA_CFG>;
+ interconnect-names = "memory",
+ "imem",
+ "config";
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
new file mode 100644
index 000000000000..b9f90081046f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qcom,ipq8064-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm ipq806x MDIO bus controller
+
+maintainers:
+ - Ansuel Smith <ansuelsmth@gmail.com>
+
+description:
+ The ipq806x soc have a MDIO dedicated controller that is
+ used to communicate with the gmac phy connected.
+
+allOf:
+ - $ref: "mdio.yaml#"
+
+properties:
+ compatible:
+ const: qcom,ipq8064-mdio
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+
+ mdio0: mdio@37000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,ipq8064-mdio";
+ reg = <0x37000000 0x200000>;
+
+ clocks = <&gcc GMAC_CORE1_CLK>;
+
+ switch@10 {
+ compatible = "qca,qca8337";
+ /* ... */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
index 7e675dafc256..3a76d8faaaed 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -4,17 +4,27 @@ This node provides properties for configuring the MediaTek mt76xx wireless
device. The node is expected to be specified as a child node of the PCI
controller to which the wireless chip is connected.
-Alternatively, it can specify the wireless part of the MT7628/MT7688 SoC.
-For SoC, use the compatible string "mediatek,mt7628-wmac" and the following
-properties:
+Alternatively, it can specify the wireless part of the MT7628/MT7688 or
+MT7622 SoC. For SoC, use the following compatible strings:
+
+compatible:
+- "mediatek,mt7628-wmac" for MT7628/MT7688
+- "mediatek,mt7622-wmac" for MT7622
+properties:
- reg: Address and length of the register set for the device.
- interrupts: Main device interrupt
+MT7622 specific properties:
+- power-domains: phandle to the power domain that the WMAC is part of
+- mediatek,infracfg: phandle to the infrastructure bus fabric syscon node
+
Optional properties:
- ieee80211-freq-limit: See ieee80211.txt
- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+- big-endian: if the radio eeprom partition is written in big-endian, specify
+ this property
The MAC address can as well be set with corresponding optional properties
defined in net/ethernet.txt.
@@ -31,6 +41,7 @@ Optional nodes:
reg = <0x0000 0 0 0 0>;
ieee80211-freq-limit = <5000000 6000000>;
mediatek,mtd-eeprom = <&factory 0x8000>;
+ big-endian;
led {
led-sources = <2>;
@@ -50,3 +61,15 @@ wmac: wmac@10300000 {
mediatek,mtd-eeprom = <&factory 0x0000>;
};
+
+MT7622 example:
+
+wmac: wmac@18000000 {
+ compatible = "mediatek,mt7622-wmac";
+ reg = <0 0x18000000 0 0x100000>;
+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
+
+ mediatek,infracfg = <&infracfg>;
+
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
index f38950560982..88fd28d15eac 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
@@ -9,11 +9,12 @@ Required properties:
- spi-max-frequency : Maximum SPI clocking speed of device in Hz
- interrupts : Should contain interrupt line
- vio-supply : phandle to regulator providing VIO
-- ti,power-gpio : GPIO connected to chip's PMEN pin
Optional properties:
- ti,wl1251-has-eeprom : boolean, the wl1251 has an eeprom connected, which
provides configuration data (calibration, MAC, ...)
+- ti,power-gpio : GPIO connected to chip's PMEN pin if operated in
+ SPI mode
- Please consult Documentation/devicetree/bindings/spi/spi-bus.txt
for optional SPI connection related properties,
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index b43c6c65294e..65980224d550 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -76,6 +76,8 @@ examples:
qfprom: eeprom@700000 {
#address-cells = <1>;
#size-cells = <1>;
+ reg = <0x00700000 0x100000>;
+
wp-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
/* ... */
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
index 020ef9e4c411..94ac23687b7e 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
@@ -86,7 +86,7 @@ examples:
#include <dt-bindings/clock/sun4i-a10-ccu.h>
#include <dt-bindings/reset/sun4i-a10-ccu.h>
- usbphy: phy@01c13400 {
+ usbphy: phy@1c13400 {
#phy-cells = <1>;
compatible = "allwinner,sun4i-a10-usb-phy";
reg = <0x01c13400 0x10>, <0x01c14800 0x4>, <0x01c1c800 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index bb690e20c368..135c7dfbc180 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -17,7 +17,7 @@ description: |+
"aspeed,ast2400-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index f7f5d57f2c9a..824f7fd1d51b 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -18,7 +18,7 @@ description: |+
"aspeed,g5-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index 3749fa233e87..ac8d1c30a8ed 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -17,7 +17,7 @@ description: |+
"aspeed,ast2600-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 754ea7ab040a..ef4de32cb17c 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -248,7 +248,7 @@ examples:
};
//Example 3 pin groups
- pinctrl@60020000 {
+ pinctrl {
usart1_pins_a: usart1-0 {
pins1 {
pinmux = <STM32_PINMUX('A', 9, AF7)>;
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index aab70e8b681e..d3098c924b25 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -18,7 +18,7 @@ description: |+
"amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.txt b/Documentation/devicetree/bindings/power/domain-idle-state.txt
deleted file mode 100644
index eefc7ed22ca2..000000000000
--- a/Documentation/devicetree/bindings/power/domain-idle-state.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-PM Domain Idle State Node:
-
-A domain idle state node represents the state parameters that will be used to
-select the state when there are no active components in the domain.
-
-The state node has the following parameters -
-
-- compatible:
- Usage: Required
- Value type: <string>
- Definition: Must be "domain-idle-state".
-
-- entry-latency-us
- Usage: Required
- Value type: <prop-encoded-array>
- Definition: u32 value representing worst case latency in
- microseconds required to enter the idle state.
- The exit-latency-us duration may be guaranteed
- only after entry-latency-us has passed.
-
-- exit-latency-us
- Usage: Required
- Value type: <prop-encoded-array>
- Definition: u32 value representing worst case latency
- in microseconds required to exit the idle state.
-
-- min-residency-us
- Usage: Required
- Value type: <prop-encoded-array>
- Definition: u32 value representing minimum residency duration
- in microseconds after which the idle state will yield
- power benefits after overcoming the overhead in entering
-i the idle state.
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.yaml b/Documentation/devicetree/bindings/power/domain-idle-state.yaml
new file mode 100644
index 000000000000..dfba1af9abe5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/domain-idle-state.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/domain-idle-state.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PM Domain Idle States binding description
+
+maintainers:
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+description:
+ A domain idle state node represents the state parameters that will be used to
+ select the state when there are no active components in the PM domain.
+
+properties:
+ $nodename:
+ const: domain-idle-states
+
+patternProperties:
+ "^(cpu|cluster|domain)-":
+ type: object
+ description:
+ Each state node represents a domain idle state description.
+
+ properties:
+ compatible:
+ const: domain-idle-state
+
+ entry-latency-us:
+ description:
+ The worst case latency in microseconds required to enter the idle
+ state. Note that, the exit-latency-us duration may be guaranteed only
+ after the entry-latency-us has passed.
+
+ exit-latency-us:
+ description:
+ The worst case latency in microseconds required to exit the idle
+ state.
+
+ min-residency-us:
+ description:
+ The minimum residency duration in microseconds after which the idle
+ state will yield power benefits, after overcoming the overhead while
+ entering the idle state.
+
+ required:
+ - compatible
+ - entry-latency-us
+ - exit-latency-us
+ - min-residency-us
+
+examples:
+ - |
+
+ domain-idle-states {
+ domain_retention: domain-retention {
+ compatible = "domain-idle-state";
+ entry-latency-us = <20>;
+ exit-latency-us = <40>;
+ min-residency-us = <80>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/power-domain.yaml b/Documentation/devicetree/bindings/power/power-domain.yaml
index 455b573293ae..6047aacd7766 100644
--- a/Documentation/devicetree/bindings/power/power-domain.yaml
+++ b/Documentation/devicetree/bindings/power/power-domain.yaml
@@ -25,22 +25,20 @@ description: |+
properties:
$nodename:
- pattern: "^(power-controller|power-domain)(@.*)?$"
+ pattern: "^(power-controller|power-domain)([@-].*)?$"
domain-idle-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
- description:
- A phandle of an idle-state that shall be soaked into a generic domain
- power state. The idle state definitions are compatible with
- domain-idle-state specified in
- Documentation/devicetree/bindings/power/domain-idle-state.txt
- phandles that are not compatible with domain-idle-state will be ignored.
- The domain-idle-state property reflects the idle state of this PM domain
- and not the idle states of the devices or sub-domains in the PM domain.
- Devices and sub-domains have their own idle-states independent
- of the parent domain's idle states. In the absence of this property,
- the domain would be considered as capable of being powered-on
- or powered-off.
+ description: |
+ Phandles of idle states that defines the available states for the
+ power-domain provider. The idle state definitions are compatible with the
+ domain-idle-state bindings, specified in ./domain-idle-state.yaml.
+
+ Note that, the domain-idle-state property reflects the idle states of this
+ PM domain and not the idle states of the devices or sub-domains in the PM
+ domain. Devices and sub-domains have their own idle states independent of
+ the parent domain's idle states. In the absence of this property, the
+ domain would be considered as capable of being powered-on or powered-off.
operating-points-v2:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 5b09b2deb483..08497ef26c7a 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -109,4 +109,4 @@ Example:
required-opps = <&domain1_opp_1>;
};
-[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
+[1]. Documentation/devicetree/bindings/power/domain-idle-state.yaml
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index f5cdac8b2847..8b005192f6e8 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -161,7 +161,7 @@ The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
of the PMICs below.
-pm8005:
+pm8004:
s2, s5
pm8005:
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 92ff2e8ad572..91a39a33000b 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -191,7 +191,7 @@ patternProperties:
examples:
- |
- xyzreg: regulator@0 {
+ xyzreg: regulator {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
diff --git a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
index 246dea8a2ec9..8ac437282659 100644
--- a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
+++ b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
@@ -23,7 +23,11 @@ properties:
description: Global reset register offset and bit offset.
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- - maxItems: 2
+ items:
+ - description: Register offset
+ - description: Register bit offset
+ minimum: 0
+ maximum: 31
"#reset-cells":
minimum: 2
diff --git a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt
index b4edaf7c7ff3..2880d5dda95e 100644
--- a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt
+++ b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt
@@ -3,4 +3,4 @@ STMicroelectronics STM32MP1 Peripheral Reset Controller
The RCC IP is both a reset and a clock controller.
-Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt
+Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 944743dd9212..c42b91e525fa 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -36,7 +36,7 @@ SAI subnodes required properties:
- clock-names: Must contain "sai_ck".
Must also contain "MCLK", if SAI shares a master clock,
with a SAI set as MCLK clock provider.
- - dmas: see Documentation/devicetree/bindings/dma/stm32-dma.txt
+ - dmas: see Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
- dma-names: identifier string for each DMA request line
"tx": if sai sub-block is configured as playback DAI
"rx": if sai sub-block is configured as capture DAI
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
index 33826f2459fa..ca9101777c44 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
@@ -10,7 +10,7 @@ Required properties:
- clock-names: must contain "kclk"
- interrupts: cpu DAI interrupt line
- dmas: DMA specifiers for audio data DMA and iec control flow DMA
- See STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt
+ See STM32 DMA bindings, Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
- dma-names: two dmas have to be defined, "rx" and "rx-ctrl"
Optional properties:
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
index f0d979664f07..e49ecbf715ba 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
@@ -49,7 +49,7 @@ properties:
dmas:
description: |
DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
- the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt.
+ the STM32 DMA bindings Documentation/devicetree/bindings/dma/st,stm32-dma.yaml.
items:
- description: rx DMA channel
- description: tx DMA channel
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index 80bac7a182d5..4b5509436588 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -125,7 +125,7 @@ examples:
#size-cells = <1>;
ranges;
- sram_a: sram@00000000 {
+ sram_a: sram@0 {
compatible = "mmio-sram";
reg = <0x00000000 0xc000>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
index d9fdf4809a49..f3e68ed03abf 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
@@ -17,7 +17,7 @@ description: |+
"brcm,bcm2711-avs-monitor", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
index 23e989e09766..d918cee100ac 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
@@ -87,7 +87,7 @@ additionalProperties: false
examples:
- |
- timer {
+ timer@1c20c00 {
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x400>;
interrupts = <22>,
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
index e5953e7e4bf4..2104830a99ae 100644
--- a/Documentation/driver-api/dmaengine/client.rst
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -151,8 +151,8 @@ The details of these operations are:
Note that callbacks will always be invoked from the DMA
engines tasklet, never from interrupt context.
-Optional: per descriptor metadata
----------------------------------
+ **Optional: per descriptor metadata**
+
DMAengine provides two ways for metadata support.
DESC_METADATA_CLIENT
@@ -199,12 +199,15 @@ Optional: per descriptor metadata
DESC_METADATA_CLIENT
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
construct the metadata in the client's buffer
2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor
3. submit the transfer
+
- DMA_DEV_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor
@@ -215,6 +218,7 @@ Optional: per descriptor metadata
DESC_METADATA_ENGINE
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
engine's metadata area
@@ -222,7 +226,9 @@ Optional: per descriptor metadata
4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the
amount of data the client has placed into the metadata buffer
5. submit the transfer
+
- DMA_DEV_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
2. submit the transfer
3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
@@ -278,8 +284,8 @@ Optional: per descriptor metadata
void dma_async_issue_pending(struct dma_chan *chan);
-Further APIs:
--------------
+Further APIs
+------------
1. Terminate APIs
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index dc497b96fa4f..55336a47a110 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -164,9 +164,9 @@ file.
void __iomem *base;
};
- struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+ debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index f18506083ced..26c093969573 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -850,3 +850,11 @@ business doing so.
d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
very suspect (and won't work in modules). Such uses are very likely to
be misspelled d_alloc_anon().
+
+---
+
+**mandatory**
+
+[should've been added in 2016] stale comment in finish_open() nonwithstanding,
+failure exits in ->atomic_open() instances should *NOT* fput() the file,
+no matter what. Everything is handled by the caller.
diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst
index c81e0b4abd28..471be1e98d6f 100644
--- a/Documentation/hwmon/adm1177.rst
+++ b/Documentation/hwmon/adm1177.rst
@@ -20,8 +20,7 @@ Usage Notes
-----------
This driver does not auto-detect devices. You will have to instantiate the
-devices explicitly. Please see Documentation/i2c/instantiating-devices for
-details.
+devices explicitly. Please see :doc:`/i2c/instantiating-devices` for details.
Sysfs entries
diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst
index 1a7683e7acb2..8b46e8591fe0 100644
--- a/Documentation/networking/devlink/devlink-region.rst
+++ b/Documentation/networking/devlink/devlink-region.rst
@@ -40,9 +40,6 @@ example usage
# Delete a snapshot using:
$ devlink region del pci/0000:00:05.0/cr-space snapshot 1
- # Trigger (request) a snapshot be taken:
- $ devlink region trigger pci/0000:00:05.0/cr-space
-
# Dump a snapshot:
$ devlink region dump pci/0000:00:05.0/fw-health snapshot 1
0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 629a6e69c036..4e4b97f7971a 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -37,6 +37,12 @@ parameters.
* ``smfs`` Software managed flow steering. In SMFS mode, the HW
steering entities are created and manage through the driver without
firmware intervention.
+ * - ``fdb_large_groups``
+ - u32
+ - driverinit
+ - Control the number of large groups (size > 1) in the FDB table.
+
+ * The default value is 15, and the range is between 1 and 1024.
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index f1f868479ceb..31a601cafa3f 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -189,6 +189,14 @@ Userspace to kernel:
``ETHTOOL_MSG_DEBUG_SET`` set debugging settings
``ETHTOOL_MSG_WOL_GET`` get wake-on-lan settings
``ETHTOOL_MSG_WOL_SET`` set wake-on-lan settings
+ ``ETHTOOL_MSG_FEATURES_GET`` get device features
+ ``ETHTOOL_MSG_FEATURES_SET`` set device features
+ ``ETHTOOL_MSG_PRIVFLAGS_GET`` get private flags
+ ``ETHTOOL_MSG_PRIVFLAGS_SET`` set private flags
+ ``ETHTOOL_MSG_RINGS_GET`` get ring sizes
+ ``ETHTOOL_MSG_RINGS_SET`` set ring sizes
+ ``ETHTOOL_MSG_CHANNELS_GET`` get channel counts
+ ``ETHTOOL_MSG_CHANNELS_SET`` set channel counts
===================================== ================================
Kernel to userspace:
@@ -204,6 +212,15 @@ Kernel to userspace:
``ETHTOOL_MSG_DEBUG_NTF`` debugging settings notification
``ETHTOOL_MSG_WOL_GET_REPLY`` wake-on-lan settings
``ETHTOOL_MSG_WOL_NTF`` wake-on-lan settings notification
+ ``ETHTOOL_MSG_FEATURES_GET_REPLY`` device features
+ ``ETHTOOL_MSG_FEATURES_SET_REPLY`` optional reply to FEATURES_SET
+ ``ETHTOOL_MSG_FEATURES_NTF`` netdev features notification
+ ``ETHTOOL_MSG_PRIVFLAGS_GET_REPLY`` private flags
+ ``ETHTOOL_MSG_PRIVFLAGS_NTF`` private flags
+ ``ETHTOOL_MSG_RINGS_GET_REPLY`` ring sizes
+ ``ETHTOOL_MSG_RINGS_NTF`` ring sizes
+ ``ETHTOOL_MSG_CHANNELS_GET_REPLY`` channel counts
+ ``ETHTOOL_MSG_CHANNELS_NTF`` channel counts
===================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -521,6 +538,213 @@ Request contents:
``WAKE_MAGICSECURE`` mode.
+FEATURES_GET
+============
+
+Gets netdev features like ``ETHTOOL_GFEATURES`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested reply header
+ ``ETHTOOL_A_FEATURES_HW`` bitset dev->hw_features
+ ``ETHTOOL_A_FEATURES_WANTED`` bitset dev->wanted_features
+ ``ETHTOOL_A_FEATURES_ACTIVE`` bitset dev->features
+ ``ETHTOOL_A_FEATURES_NOCHANGE`` bitset NETIF_F_NEVER_CHANGE
+ ==================================== ====== ==========================
+
+Bitmaps in kernel response have the same meaning as bitmaps used in ioctl
+interference but attribute names are different (they are based on
+corresponding members of struct net_device). Legacy "flags" are not provided,
+if userspace needs them (most likely only ethtool for backward compatibility),
+it can calculate their values from related feature bits itself.
+ETHA_FEATURES_HW uses mask consisting of all features recognized by kernel (to
+provide all names when using verbose bitmap format), the other three use no
+mask (simple bit lists).
+
+
+FEATURES_SET
+============
+
+Request to set netdev features like ``ETHTOOL_SFEATURES`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested request header
+ ``ETHTOOL_A_FEATURES_WANTED`` bitset requested features
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested reply header
+ ``ETHTOOL_A_FEATURES_WANTED`` bitset diff wanted vs. result
+ ``ETHTOOL_A_FEATURES_ACTIVE`` bitset diff old vs. new active
+ ==================================== ====== ==========================
+
+Request constains only one bitset which can be either value/mask pair (request
+to change specific feature bits and leave the rest) or only a value (request
+to set all features to specified set).
+
+As request is subject to netdev_change_features() sanity checks, optional
+kernel reply (can be suppressed by ``ETHTOOL_FLAG_OMIT_REPLY`` flag in request
+header) informs client about the actual result. ``ETHTOOL_A_FEATURES_WANTED``
+reports the difference between client request and actual result: mask consists
+of bits which differ between requested features and result (dev->features
+after the operation), value consists of values of these bits in the request
+(i.e. negated values from resulting features). ``ETHTOOL_A_FEATURES_ACTIVE``
+reports the difference between old and new dev->features: mask consists of
+bits which have changed, values are their values in new dev->features (after
+the operation).
+
+``ETHTOOL_MSG_FEATURES_NTF`` notification is sent not only if device features
+are modified using ``ETHTOOL_MSG_FEATURES_SET`` request or on of ethtool ioctl
+request but also each time features are modified with netdev_update_features()
+or netdev_change_features().
+
+
+PRIVFLAGS_GET
+=============
+
+Gets private flags like ``ETHTOOL_GPFLAGS`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PRIVFLAGS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PRIVFLAGS_HEADER`` nested reply header
+ ``ETHTOOL_A_PRIVFLAGS_FLAGS`` bitset private flags
+ ==================================== ====== ==========================
+
+``ETHTOOL_A_PRIVFLAGS_FLAGS`` is a bitset with values of device private flags.
+These flags are defined by driver, their number and names (and also meaning)
+are device dependent. For compact bitset format, names can be retrieved as
+``ETH_SS_PRIV_FLAGS`` string set. If verbose bitset format is requested,
+response uses all private flags supported by the device as mask so that client
+gets the full information without having to fetch the string set with names.
+
+
+PRIVFLAGS_SET
+=============
+
+Sets or modifies values of device private flags like ``ETHTOOL_SPFLAGS``
+ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PRIVFLAGS_HEADER`` nested request header
+ ``ETHTOOL_A_PRIVFLAGS_FLAGS`` bitset private flags
+ ==================================== ====== ==========================
+
+``ETHTOOL_A_PRIVFLAGS_FLAGS`` can either set the whole set of private flags or
+modify only values of some of them.
+
+
+RINGS_GET
+=========
+
+Gets ring sizes like ``ETHTOOL_GRINGPARAM`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_HEADER`` nested reply header
+ ``ETHTOOL_A_RINGS_RX_MAX`` u32 max size of RX ring
+ ``ETHTOOL_A_RINGS_RX_MINI_MAX`` u32 max size of RX mini ring
+ ``ETHTOOL_A_RINGS_RX_JUMBO_MAX`` u32 max size of RX jumbo ring
+ ``ETHTOOL_A_RINGS_TX_MAX`` u32 max size of TX ring
+ ``ETHTOOL_A_RINGS_RX`` u32 size of RX ring
+ ``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring
+ ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
+ ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
+ ==================================== ====== ==========================
+
+
+RINGS_SET
+=========
+
+Sets ring sizes like ``ETHTOOL_SRINGPARAM`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_HEADER`` nested reply header
+ ``ETHTOOL_A_RINGS_RX`` u32 size of RX ring
+ ``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring
+ ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
+ ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
+ ==================================== ====== ==========================
+
+Kernel checks that requested ring sizes do not exceed limits reported by
+driver. Driver may impose additional constraints and may not suspport all
+attributes.
+
+
+CHANNELS_GET
+============
+
+Gets channel counts like ``ETHTOOL_GCHANNELS`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_CHANNELS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_CHANNELS_HEADER`` nested reply header
+ ``ETHTOOL_A_CHANNELS_RX_MAX`` u32 max receive channels
+ ``ETHTOOL_A_CHANNELS_TX_MAX`` u32 max transmit channels
+ ``ETHTOOL_A_CHANNELS_OTHER_MAX`` u32 max other channels
+ ``ETHTOOL_A_CHANNELS_COMBINED_MAX`` u32 max combined channels
+ ``ETHTOOL_A_CHANNELS_RX_COUNT`` u32 receive channel count
+ ``ETHTOOL_A_CHANNELS_TX_COUNT`` u32 transmit channel count
+ ``ETHTOOL_A_CHANNELS_OTHER_COUNT`` u32 other channel count
+ ``ETHTOOL_A_CHANNELS_COMBINED_COUNT`` u32 combined channel count
+ ===================================== ====== ==========================
+
+
+CHANNELS_SET
+============
+
+Sets channel counts like ``ETHTOOL_SCHANNELS`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_CHANNELS_HEADER`` nested request header
+ ``ETHTOOL_A_CHANNELS_RX_COUNT`` u32 receive channel count
+ ``ETHTOOL_A_CHANNELS_TX_COUNT`` u32 transmit channel count
+ ``ETHTOOL_A_CHANNELS_OTHER_COUNT`` u32 other channel count
+ ``ETHTOOL_A_CHANNELS_COMBINED_COUNT`` u32 combined channel count
+ ===================================== ====== ==========================
+
+Kernel checks that requested channel counts do not exceed limits reported by
+driver. Driver may impose additional constraints and may not suspport all
+attributes.
+
+
Request translation
===================
@@ -547,35 +771,35 @@ have their netlink replacement yet.
``ETHTOOL_SEEPROM`` n/a
``ETHTOOL_GCOALESCE`` n/a
``ETHTOOL_SCOALESCE`` n/a
- ``ETHTOOL_GRINGPARAM`` n/a
- ``ETHTOOL_SRINGPARAM`` n/a
+ ``ETHTOOL_GRINGPARAM`` ``ETHTOOL_MSG_RINGS_GET``
+ ``ETHTOOL_SRINGPARAM`` ``ETHTOOL_MSG_RINGS_SET``
``ETHTOOL_GPAUSEPARAM`` n/a
``ETHTOOL_SPAUSEPARAM`` n/a
- ``ETHTOOL_GRXCSUM`` n/a
- ``ETHTOOL_SRXCSUM`` n/a
- ``ETHTOOL_GTXCSUM`` n/a
- ``ETHTOOL_STXCSUM`` n/a
- ``ETHTOOL_GSG`` n/a
- ``ETHTOOL_SSG`` n/a
+ ``ETHTOOL_GRXCSUM`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SRXCSUM`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GTXCSUM`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_STXCSUM`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GSG`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SSG`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_TEST`` n/a
``ETHTOOL_GSTRINGS`` ``ETHTOOL_MSG_STRSET_GET``
``ETHTOOL_PHYS_ID`` n/a
``ETHTOOL_GSTATS`` n/a
- ``ETHTOOL_GTSO`` n/a
- ``ETHTOOL_STSO`` n/a
+ ``ETHTOOL_GTSO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_STSO`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GPERMADDR`` rtnetlink ``RTM_GETLINK``
- ``ETHTOOL_GUFO`` n/a
- ``ETHTOOL_SUFO`` n/a
- ``ETHTOOL_GGSO`` n/a
- ``ETHTOOL_SGSO`` n/a
- ``ETHTOOL_GFLAGS`` n/a
- ``ETHTOOL_SFLAGS`` n/a
- ``ETHTOOL_GPFLAGS`` n/a
- ``ETHTOOL_SPFLAGS`` n/a
+ ``ETHTOOL_GUFO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SUFO`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GGSO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SGSO`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GFLAGS`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SFLAGS`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GPFLAGS`` ``ETHTOOL_MSG_PRIVFLAGS_GET``
+ ``ETHTOOL_SPFLAGS`` ``ETHTOOL_MSG_PRIVFLAGS_SET``
``ETHTOOL_GRXFH`` n/a
``ETHTOOL_SRXFH`` n/a
- ``ETHTOOL_GGRO`` n/a
- ``ETHTOOL_SGRO`` n/a
+ ``ETHTOOL_GGRO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SGRO`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GRXRINGS`` n/a
``ETHTOOL_GRXCLSRLCNT`` n/a
``ETHTOOL_GRXCLSRULE`` n/a
@@ -589,10 +813,10 @@ have their netlink replacement yet.
``ETHTOOL_GSSET_INFO`` ``ETHTOOL_MSG_STRSET_GET``
``ETHTOOL_GRXFHINDIR`` n/a
``ETHTOOL_SRXFHINDIR`` n/a
- ``ETHTOOL_GFEATURES`` n/a
- ``ETHTOOL_SFEATURES`` n/a
- ``ETHTOOL_GCHANNELS`` n/a
- ``ETHTOOL_SCHANNELS`` n/a
+ ``ETHTOOL_GFEATURES`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SFEATURES`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GCHANNELS`` ``ETHTOOL_MSG_CHANNELS_GET``
+ ``ETHTOOL_SCHANNELS`` ``ETHTOOL_MSG_CHANNELS_SET``
``ETHTOOL_SET_DUMP`` n/a
``ETHTOOL_GET_DUMP_FLAG`` n/a
``ETHTOOL_GET_DUMP_DATA`` n/a
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5f53faff4e25..ee961d322d93 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -958,6 +958,15 @@ ip_nonlocal_bind - BOOLEAN
which can be quite useful - but may break some applications.
Default: 0
+ip_autobind_reuse - BOOLEAN
+ By default, bind() does not select the ports automatically even if
+ the new socket and all sockets bound to the port have SO_REUSEADDR.
+ ip_autobind_reuse allows bind() to reuse the port and this is useful
+ when you use bind()+connect(), but may break some applications.
+ The preferred solution is to use IP_BIND_ADDRESS_NO_PORT and this
+ option should only be set by experts.
+ Default: 0
+
ip_dynaddr - BOOLEAN
If set non-zero, enables support for dynamic addresses.
If set to a non-zero value larger than 1, a kernel log
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index 06c97dcb57ca..e143ab79a960 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -8,9 +8,9 @@ Overview
========
The net_failover driver provides an automated failover mechanism via APIs
-to create and destroy a failover master netdev and mananges a primary and
+to create and destroy a failover master netdev and manages a primary and
standby slave netdevs that get registered via the generic failover
-infrastructrure.
+infrastructure.
The failover netdev acts a master device and controls 2 slave devices. The
original paravirtual interface is registered as 'standby' slave netdev and
@@ -29,7 +29,7 @@ virtio-net accelerated datapath: STANDBY mode
=============================================
net_failover enables hypervisor controlled accelerated datapath to virtio-net
-enabled VMs in a transparent manner with no/minimal guest userspace chanages.
+enabled VMs in a transparent manner with no/minimal guest userspace changes.
To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY
feature on the virtio-net interface and assign the same MAC address to both
diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt
index f2a0147c933d..eec61694e894 100644
--- a/Documentation/networking/rds.txt
+++ b/Documentation/networking/rds.txt
@@ -159,7 +159,7 @@ Socket Interface
set SO_RDS_TRANSPORT on a socket for which the transport has
been previously attached explicitly (by SO_RDS_TRANSPORT) or
implicitly (via bind(2)) will return an error of EOPNOTSUPP.
- An attempt to set SO_RDS_TRANSPPORT to RDS_TRANS_NONE will
+ An attempt to set SO_RDS_TRANSPORT to RDS_TRANS_NONE will
always return EINVAL.
RDMA for RDS
diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst
index 8d7af28cd835..5aec7c8857d0 100644
--- a/Documentation/networking/sfp-phylink.rst
+++ b/Documentation/networking/sfp-phylink.rst
@@ -138,27 +138,27 @@ this documentation.
.. code-block:: c
- static int foo_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
- {
- struct foo_priv *priv = netdev_priv(dev);
-
- return phylink_ethtool_ksettings_set(priv->phylink, cmd);
- }
-
- static int foo_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
- {
- struct foo_priv *priv = netdev_priv(dev);
+ static int foo_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+ struct foo_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+ }
- return phylink_ethtool_ksettings_get(priv->phylink, cmd);
- }
+ static int foo_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+ {
+ struct foo_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+ }
-7. Replace the call to:
+7. Replace the call to::
phy_dev = of_phy_connect(dev, node, link_func, flags, phy_interface);
- and associated code with a call to:
+ and associated code with a call to::
err = phylink_of_phy_connect(priv->phylink, node, flags);
diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst
index 002e42745263..ced8a8007434 100644
--- a/Documentation/power/index.rst
+++ b/Documentation/power/index.rst
@@ -13,7 +13,6 @@ Power Management
drivers-testing
energy-model
freezing-of-tasks
- interface
opp
pci
pm_qos_interface
diff --git a/MAINTAINERS b/MAINTAINERS
index 8f27f40d22bb..439151d589a4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -693,7 +693,7 @@ ALLWINNER CPUFREQ DRIVER
M: Yangtao Li <tiny.windzz@gmail.com>
L: linux-pm@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
+F: Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
ALLWINNER CRYPTO DRIVERS
@@ -4017,7 +4017,7 @@ M: Cheng-Yi Chiang <cychiang@chromium.org>
S: Maintained
R: Enric Balletbo i Serra <enric.balletbo@collabora.com>
R: Guenter Roeck <groeck@chromium.org>
-F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
F: sound/soc/codecs/cros_ec_codec.*
CIRRUS LOGIC AUDIO CODEC DRIVERS
@@ -4073,7 +4073,6 @@ F: drivers/scsi/snic/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Govindarajulu Varadarajan <_govind@gmx.com>
-M: Parvi Kaustubhi <pkaustub@cisco.com>
S: Supported
F: drivers/net/ethernet/cisco/enic/
@@ -4475,7 +4474,7 @@ L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/platform/sunxi/sun6i-csi/
-F: Documentation/devicetree/bindings/media/sun6i-csi.txt
+F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
CW1200 WLAN driver
M: Solomon Peachy <pizza@shaftnet.org>
@@ -4572,7 +4571,7 @@ F: drivers/infiniband/hw/cxgb4/
F: include/uapi/rdma/cxgb4-abi.h
CXGB4VF ETHERNET DRIVER (CXGB4VF)
-M: Casey Leedom <leedom@chelsio.com>
+M: Vishal Kulkarni <vishal@gmail.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -5668,7 +5667,7 @@ L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/stm
-F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
DRM DRIVERS FOR TI LCDC
M: Jyri Sarha <jsarha@ti.com>
@@ -6198,7 +6197,6 @@ S: Supported
F: drivers/scsi/be2iscsi/
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
-M: Sathya Perla <sathya.perla@broadcom.com>
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
M: Somnath Kotur <somnath.kotur@broadcom.com>
@@ -7738,7 +7736,7 @@ Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
-M: Sasha Levin <sashal@kernel.org>
+M: Wei Liu <wei.liu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
L: linux-hyperv@vger.kernel.org
S: Supported
@@ -10164,7 +10162,7 @@ MAXBOTIX ULTRASONIC RANGER IIO DRIVER
M: Andreas Klinger <ak@it-klinger.de>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
+F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER
@@ -10467,7 +10465,7 @@ M: Hugues Fruchet <hugues.fruchet@st.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Supported
-F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
+F: Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
F: drivers/media/platform/stm32/stm32-dcmi.c
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
@@ -11119,7 +11117,7 @@ M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
L: linux-mips@vger.kernel.org
W: http://www.linux-mips.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
-Q: http://patchwork.linux-mips.org/project/linux-mips/list/
+Q: https://patchwork.kernel.org/project/linux-mips/list/
S: Maintained
F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/
@@ -12739,7 +12737,7 @@ M: Tom Joseph <tjoseph@cadence.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/cdns,*.txt
-F: drivers/pci/controller/pcie-cadence*
+F: drivers/pci/controller/cadence/
PCI DRIVER FOR FREESCALE LAYERSCAPE
M: Minghuan Lian <minghuan.Lian@nxp.com>
@@ -12952,7 +12950,6 @@ M: Robert Richter <rrichter@marvell.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
-F: Documentation/devicetree/bindings/pci/pci-thunder-*
F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON
@@ -13662,6 +13659,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/qcom/
+QCOM IPA DRIVER
+M: Alex Elder <elder@kernel.org>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ipa/
+
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
M: Gabriel Somlo <somlo@cmu.edu>
M: "Michael S. Tsirkin" <mst@redhat.com>
@@ -14227,7 +14230,7 @@ F: include/dt-bindings/reset/
F: include/linux/reset.h
F: include/linux/reset/
F: include/linux/reset-controller.h
-K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
+K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
RESTARTABLE SEQUENCES SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
@@ -15922,7 +15925,7 @@ F: drivers/*/stm32-*timer*
F: drivers/pwm/pwm-stm32*
F: include/linux/*/stm32-*tim*
F: Documentation/ABI/testing/*timer-stm32
-F: Documentation/devicetree/bindings/*/stm32-*timer*
+F: Documentation/devicetree/bindings/*/*stm32-*timer*
F: Documentation/devicetree/bindings/pwm/pwm-stm32*
STMMAC ETHERNET DRIVER
@@ -16081,6 +16084,8 @@ SYNOPSYS DESIGNWARE 8250 UART DRIVER
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: drivers/tty/serial/8250/8250_dw.c
+F: drivers/tty/serial/8250/8250_dwlib.*
+F: drivers/tty/serial/8250/8250_lpss.c
SYNOPSYS DESIGNWARE APB GPIO DRIVER
M: Hoan Tran <hoan@os.amperecomputing.com>
@@ -16111,6 +16116,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/synopsys/
+SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER
+M: Jose Abreu <Jose.Abreu@synopsys.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/phy/mdio-xpcs.c
+F: include/linux/mdio-xpcs.h
+
SYNOPSYS DESIGNWARE I2C DRIVER
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -17852,6 +17864,13 @@ S: Supported
F: arch/x86/kernel/cpu/vmware.c
F: arch/x86/include/asm/vmware.h
+VMWARE VIRTUAL PTP CLOCK DRIVER
+M: Vivek Thampi <vithampi@vmware.com>
+M: "VMware, Inc." <pv-drivers@vmware.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/ptp/ptp_vmw.c
+
VMWARE PVRDMA DRIVER
M: Adit Ranadive <aditr@vmware.com>
M: VMware PV-Drivers <pv-drivers@vmware.com>
diff --git a/Makefile b/Makefile
index 1a1a0d271697..e25db579ce74 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 98de654b79b3..17fe351cdde0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -738,8 +738,9 @@ config HAVE_STACK_VALIDATION
config HAVE_RELIABLE_STACKTRACE
bool
help
- Architecture has a save_stack_trace_tsk_reliable() function which
- only returns a stack trace if it can guarantee the trace is reliable.
+ Architecture has either save_stack_trace_tsk_reliable() or
+ arch_stack_walk_reliable() function which only returns a stack trace
+ if it can guarantee the trace is reliable.
config HAVE_ARCH_HASH
bool
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index f3ced6df0c9b..9f66f96d09c9 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -526,11 +526,11 @@
* Supply voltage supervisor on board will not allow opp50 so
* disable it and set opp100 as suspend OPP.
*/
- opp50@300000000 {
+ opp50-300000000 {
status = "disabled";
};
- opp100@600000000 {
+ opp100-600000000 {
opp-suspend;
};
};
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index 1b5a835f66bd..efea891b1a76 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -21,6 +21,7 @@
aliases {
ethernet0 = &genet;
+ pcie0 = &pcie0;
};
leds {
@@ -31,6 +32,8 @@
pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
};
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
index 66ab35eccba7..28be0332c1c8 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
@@ -26,6 +26,8 @@
pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
};
};
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
index 74ed6d047807..37343148643d 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
@@ -27,6 +27,8 @@
pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
};
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index de7f85efaa51..af06a55d1c5c 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -61,10 +61,10 @@
regulator-max-microvolt = <1800000>;
};
- evm_3v3: fixedregulator-evm3v3 {
+ vsys_3v3: fixedregulator-vsys3v3 {
/* Output of Cntlr A of TPS43351-Q1 on dra7-evm */
compatible = "regulator-fixed";
- regulator-name = "evm_3v3";
+ regulator-name = "vsys_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&evm_12v0>;
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index fc418834890d..2119a78e9c15 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -3474,6 +3474,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
@@ -3501,6 +3502,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
@@ -3528,6 +3530,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
@@ -3555,6 +3558,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index d78b684e7fca..4305051bb769 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -184,6 +184,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
@@ -238,6 +239,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index 2f7539afef2b..42b8a205b64f 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -128,3 +128,8 @@
&usb4_tm {
status = "disabled";
};
+
+&mmc3 {
+ /* dra76x is not affected by i887 */
+ max-frequency = <96000000>;
+};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 55cef4cac5f1..dc0a93bccbf1 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -796,16 +796,6 @@
clock-div = <1>;
};
- ipu1_gfclk_mux: ipu1_gfclk_mux@520 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
- ti,bit-shift = <24>;
- reg = <0x0520>;
- assigned-clocks = <&ipu1_gfclk_mux>;
- assigned-clock-parents = <&dpll_core_h22x2_ck>;
- };
-
dummy_ck: dummy_ck {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -1564,6 +1554,8 @@
compatible = "ti,clkctrl";
reg = <0x20 0x4>;
#clock-cells = <2>;
+ assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>;
+ assigned-clock-parents = <&dpll_core_h22x2_ck>;
};
ipu_clkctrl: ipu-clkctrl@50 {
diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
index cd075621de52..84fcc203a2e4 100644
--- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
@@ -275,7 +275,7 @@
/* SRAM on Colibri nEXT_CS0 */
sram@0,0 {
- compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram";
+ compatible = "cypress,cy7c1019dv33-10zsxi", "mtd-ram";
reg = <0 0 0x00010000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -286,7 +286,7 @@
/* SRAM on Colibri nEXT_CS1 */
sram@1,0 {
- compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram";
+ compatible = "cypress,cy7c1019dv33-10zsxi", "mtd-ram";
reg = <1 0 0x00010000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
index 978dc1c2ff1b..4d18952658f8 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
@@ -192,7 +192,6 @@
pinctrl-0 = <&pinctrl_usdhc4>;
bus-width = <8>;
non-removable;
- vmmc-supply = <&vdd_emmc_1p8>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index d05be3f0e2a7..04717cf69db0 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -336,7 +336,6 @@
assigned-clock-rates = <400000000>;
bus-width = <8>;
fsl,tuning-step = <2>;
- max-frequency = <100000000>;
vmmc-supply = <&reg_module_3v3>;
vqmmc-supply = <&reg_DCDC3>;
non-removable;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 92f6d0c2a74f..4c22828df55f 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -44,7 +44,7 @@
opp-hz = /bits/ 64 <792000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <150000>;
- opp-supported-hw = <0xd>, <0xf>;
+ opp-supported-hw = <0xd>, <0x7>;
opp-suspend;
};
@@ -52,7 +52,7 @@
opp-hz = /bits/ 64 <996000000>;
opp-microvolt = <1100000>;
clock-latency-ns = <150000>;
- opp-supported-hw = <0xc>, <0xf>;
+ opp-supported-hw = <0xc>, <0x7>;
opp-suspend;
};
@@ -60,7 +60,7 @@
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1225000>;
clock-latency-ns = <150000>;
- opp-supported-hw = <0x8>, <0xf>;
+ opp-supported-hw = <0x8>, <0x3>;
opp-suspend;
};
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 0855b1fe98e0..760a68c163c8 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -747,7 +747,7 @@
};
mdio0: mdio@2d24000 {
- compatible = "fsl,etsec2-mdio";
+ compatible = "gianfar";
device_type = "mdio";
#address-cells = <1>;
#size-cells = <0>;
@@ -756,7 +756,7 @@
};
mdio1: mdio@2d64000 {
- compatible = "fsl,etsec2-mdio";
+ compatible = "gianfar";
device_type = "mdio";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
index 85665506f4f8..b6e82b165f5c 100644
--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
@@ -182,6 +182,14 @@
pwm-names = "enable", "direction";
direction-duty-cycle-ns = <10000000>;
};
+
+ backlight: backlight {
+ compatible = "led-backlight";
+
+ leds = <&backlight_led>;
+ brightness-levels = <31 63 95 127 159 191 223 255>;
+ default-brightness-level = <6>;
+ };
};
&dss {
@@ -205,6 +213,8 @@
vddi-supply = <&lcd_regulator>;
reset-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */
+ backlight = <&backlight>;
+
width-mm = <50>;
height-mm = <89>;
@@ -393,12 +403,11 @@
ramp-up-us = <1024>;
ramp-down-us = <8193>;
- led@0 {
+ backlight_led: led@0 {
reg = <0>;
led-sources = <2>;
ti,led-mode = <0>;
label = ":backlight";
- linux,default-trigger = "backlight";
};
led@1 {
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index beb9885e6ffc..c0999e27e9b1 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -377,7 +377,7 @@
};
sata: sata@fc600000 {
- compatible = "renesas,sata-r8a7779", "renesas,rcar-sata";
+ compatible = "renesas,sata-r8a7779";
reg = <0xfc600000 0x200000>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7779_CLK_SATA>;
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 519ff58e67b3..0afcae9f7cf8 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -178,6 +178,7 @@ CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_TEST_KSTRTOX=y
+CONFIG_DEBUG_FS=y
CONFIG_KGDB=y
CONFIG_KGDB_KDB=y
CONFIG_STRICT_DEVMEM=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c32c338f7704..847f9874ccc4 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -375,6 +375,7 @@ CONFIG_BACKLIGHT_GENERIC=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_PANDORA=m
CONFIG_BACKLIGHT_GPIO=m
+CONFIG_BACKLIGHT_LED=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index fe2e1e82e233..e73c97b0f5b0 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -157,6 +157,7 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_FUNCTION_TRACER=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c3314b286a61..a827b4d60d38 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_vhe_guest_enter(void) {}
-static inline void kvm_arm_vhe_guest_exit(void) {}
-
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 35ff620537e6..03506ce46149 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif
+AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 912aeceb4ff8..5aa5796cff0e 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu);
int imx_cpu_kill(unsigned int cpu);
#ifdef CONFIG_SUSPEND
-void v7_cpu_resume(void);
void imx53_suspend(void __iomem *ocram_vbase);
extern const u32 imx53_suspend_sz;
void imx6_suspend(void __iomem *ocram_vbase);
#else
-static inline void v7_cpu_resume(void) {}
static inline void imx53_suspend(void __iomem *ocram_vbase) {}
static const u32 imx53_suspend_sz;
static inline void imx6_suspend(void __iomem *ocram_vbase) {}
#endif
+void v7_cpu_resume(void);
+
void imx6_pm_ccm_init(const char *ccm_compat);
void imx6q_pm_init(void);
void imx6dl_pm_init(void);
diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
new file mode 100644
index 000000000000..5bd1ba7ef15b
--- /dev/null
+++ b/arch/arm/mach-imx/resume-imx6.S
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/hardware/cache-l2x0.h>
+#include "hardware.h"
+
+/*
+ * The following code must assume it is running from physical address
+ * where absolute virtual addresses to the data section have to be
+ * turned into relative ones.
+ */
+
+ENTRY(v7_cpu_resume)
+ bl v7_invalidate_l1
+#ifdef CONFIG_CACHE_L2X0
+ bl l2c310_early_resume
+#endif
+ b cpu_resume
+ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index 062391ff13da..1eabf2d2834b 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -327,17 +327,3 @@ resume:
ret lr
ENDPROC(imx6_suspend)
-
-/*
- * The following code must assume it is running from physical address
- * where absolute virtual addresses to the data section have to be
- * turned into relative ones.
- */
-
-ENTRY(v7_cpu_resume)
- bl v7_invalidate_l1
-#ifdef CONFIG_CACHE_L2X0
- bl l2c310_early_resume
-#endif
- b cpu_resume
-ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index 01f0f4b765e0..75034fe197e3 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -9,7 +9,6 @@ menuconfig ARCH_MESON
select CACHE_L2X0
select PINCTRL
select PINCTRL_MESON
- select COMMON_CLK
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index e1135b9d67c6..5017a3be0ff0 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -16,7 +16,7 @@ hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
clock-common = clock.o
secure-common = omap-smc.o omap-secure.o
-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
obj-$(CONFIG_SOC_AM33XX) += $(hwmod-common) $(secure-common)
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index f28047233665..27608d1026cb 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -431,7 +431,6 @@ void __init omap2420_init_early(void)
omap_hwmod_init_postsetup();
omap_clk_soc_init = omap2420_dt_clk_init;
rate_table = omap2420_rate_table;
- omap_secure_init();
}
void __init omap2420_init_late(void)
@@ -456,7 +455,6 @@ void __init omap2430_init_early(void)
omap_hwmod_init_postsetup();
omap_clk_soc_init = omap2430_dt_clk_init;
rate_table = omap2430_rate_table;
- omap_secure_init();
}
void __init omap2430_init_late(void)
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index f82f25c1a5f9..d5dc12878dfe 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -327,7 +327,7 @@
#size-cells = <0>;
bus-width = <4>;
- max-frequency = <50000000>;
+ max-frequency = <60000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index a8bb3fa9fec9..cb1b48f5b8b1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -593,6 +593,7 @@
compatible = "brcm,bcm43438-bt";
interrupt-parent = <&gpio_intc>;
interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
max-speed = <2000000>;
clocks = <&wifi32k>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
index 6082ae022136..d237162a8744 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
@@ -20,6 +20,8 @@
};
&fman0 {
+ fsl,erratum-a050385;
+
/* these aliases provide the FMan ports mapping */
enet0: ethernet@e0000 {
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index d3d26cca7d52..13460a360c6a 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -52,11 +52,6 @@
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
};
-
- ethphy1: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
};
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index e1d357eaad7c..d8c44d3ca15a 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -102,7 +102,7 @@
};
gmac0: ethernet@ff800000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff800000 0x2000>;
interrupts = <0 90 4>;
interrupt-names = "macirq";
@@ -118,7 +118,7 @@
};
gmac1: ethernet@ff802000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff802000 0x2000>;
interrupts = <0 91 4>;
interrupt-names = "macirq";
@@ -134,7 +134,7 @@
};
gmac2: ethernet@ff804000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff804000 0x2000>;
interrupts = <0 92 4>;
interrupt-names = "macirq";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 905109f6814f..4db223dbc549 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -773,7 +773,7 @@ CONFIG_ARCH_R8A774A1=y
CONFIG_ARCH_R8A774B1=y
CONFIG_ARCH_R8A774C0=y
CONFIG_ARCH_R8A7795=y
-CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77960=y
CONFIG_ARCH_R8A77961=y
CONFIG_ARCH_R8A77965=y
CONFIG_ARCH_R8A77970=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 25fec4bde43a..a358e97572c1 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
isb();
}
-static inline void gic_write_dir(u32 irq)
+static __always_inline void gic_write_dir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 806e9dc2a852..a4d1b5f771f6 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags);
}
-static inline int icache_is_vpipt(void)
+static __always_inline int icache_is_vpipt(void)
{
return test_bit(ICACHEF_VPIPT, &__icache_flags);
}
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..e6cca3d4acf7 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-static inline void __flush_icache_all(void)
+static __always_inline void __flush_icache_all(void)
{
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 92ef9539874a..2a746b99e937 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
return cpuid_feature_extract_signed_field_width(features, field, 4);
}
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
{
return (u64)(features << (64 - width - field)) >> (64 - width);
}
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field(u64 features, int field)
{
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
return val == 0x1;
}
-static inline bool system_supports_fpsimd(void)
+static __always_inline bool system_supports_fpsimd(void)
{
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
}
@@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN);
}
-static inline bool system_supports_sve(void)
+static __always_inline bool system_supports_sve(void)
{
return IS_ENABLED(CONFIG_ARM64_SVE) &&
cpus_have_const_cap(ARM64_SVE);
}
-static inline bool system_supports_cnp(void)
+static __always_inline bool system_supports_cnp(void)
{
return IS_ENABLED(CONFIG_ARM64_CNP) &&
cpus_have_const_cap(ARM64_HAS_CNP);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4e531f57147d..6facd1308e7c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
}
#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
}
@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
}
#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 688c63412cc2..f658dda12364 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
}
@@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
vcpu->arch.vsesr_el2 = vsesr;
}
-static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
}
@@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
*__vcpu_elr_el1(vcpu) = v;
}
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
}
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
}
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return kvm_condition_valid32(vcpu);
@@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
* AArch32 with banked registers.
*/
-static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
+static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num)
{
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
if (reg_num != 31)
@@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
return mode != PSR_MODE_EL0t;
}
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
+static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.esr_el2;
}
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
@@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
return -1;
}
-static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.far_el2;
}
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
+static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
@@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
}
-static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
@@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
}
-static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
@@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
}
-static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
}
/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
}
-static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
}
@@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
-static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
}
-static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
-static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
case FSC_SEA:
@@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
}
}
-static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
return ESR_ELx_SYS64_ISS_RT(esr);
@@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
return data; /* Leave LE untouched */
}
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
if (vcpu_mode_is_32bit(vcpu))
kvm_skip_instr32(vcpu, is_wide_instr);
@@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
* Skip an instruction which has been emulated at hyp while most guest sysregs
* are live.
*/
-static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
+static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d87aa609d2b6..57fd46acd058 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
-static inline void kvm_arm_vhe_guest_enter(void)
-{
- local_daif_mask();
-
- /*
- * Having IRQs masked via PMR when entering the guest means the GIC
- * will not signal the CPU of interrupts of lower priority, and the
- * only way to get out will be via guest exceptions.
- * Naturally, we want to avoid this.
- *
- * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
- * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
- */
- pmr_sync();
-}
-
-static inline void kvm_arm_vhe_guest_exit(void)
-{
- /*
- * local_daif_restore() takes care to properly restore PSTATE.DAIF
- * and the GIC PMR if the host is using IRQ priorities.
- */
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
-
- /*
- * When we exit from the guest we change a number of CPU configuration
- * parameters, such as traps. Make sure these changes take effect
- * before running the host or additional guests.
- */
- isb();
-}
-
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a3a6a2ba9a63..fe57f60f06a8 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -47,6 +47,13 @@
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
+/*
+ * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
+ * static inline can allow the compiler to out-of-line this. KVM always wants
+ * the macro version as its always inlined.
+ */
+#define __kvm_swab32(x) ___constant_swab32(x)
+
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 53d846f1bfe7..785762860c63 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
-static inline unsigned long __kern_hyp_va(unsigned long v)
+static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n"
@@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;
+/* This is only called on a VHE system */
static inline void *kvm_get_hyp_vector(void)
{
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 0958ed6191aa..61fd26752adc 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
-static inline bool has_vhe(void)
+static __always_inline bool has_vhe(void)
{
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
return true;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index dfe8dd172512..925086b46136 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
}
/* Switch to the guest for VHE systems running in EL2 */
-int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
@@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
return exit_code;
}
-NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
+NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
+
+int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ local_daif_mask();
+
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ *
+ * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
+ * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
+ */
+ pmr_sync();
+
+ ret = __kvm_vcpu_run_vhe(vcpu);
+
+ /*
+ * local_daif_restore() takes care to properly restore PSTATE.DAIF
+ * and the GIC PMR if the host is using IRQ priorities.
+ */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
+ /*
+ * When we exit from the guest we change a number of CPU configuration
+ * parameters, such as traps. Make sure these changes take effect
+ * before running the host or additional guests.
+ */
+ isb();
+
+ return ret;
+}
/* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 29ee1feba4eb..4f3a087e36d5 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
u32 data = vcpu_get_reg(vcpu, rd);
if (__is_be(vcpu)) {
/* guest pre-swabbed data, undo this for writel() */
- data = swab32(data);
+ data = __kvm_swab32(data);
}
writel_relaxed(data, addr);
} else {
u32 data = readl_relaxed(addr);
if (__is_be(vcpu)) {
/* guest expects swabbed data */
- data = swab32(data);
+ data = __kvm_swab32(data);
}
vcpu_set_reg(vcpu, rd, data);
}
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 8ef73e89d514..d89bb22589f6 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -260,14 +260,26 @@ asmlinkage void post_ttbr_update_workaround(void)
CONFIG_CAVIUM_ERRATUM_27456));
}
-static int asids_init(void)
+static int asids_update_limit(void)
{
- asid_bits = get_cpu_asid_bits();
+ unsigned long num_available_asids = NUM_USER_ASIDS;
+
+ if (arm64_kernel_unmapped_at_el0())
+ num_available_asids /= 2;
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
+ WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+ pr_info("ASID allocator initialised with %lu entries\n",
+ num_available_asids);
+ return 0;
+}
+arch_initcall(asids_update_limit);
+
+static int asids_init(void)
+{
+ asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
GFP_KERNEL);
@@ -282,8 +294,6 @@ static int asids_init(void)
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
set_kpti_asid_bits();
-
- pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}
early_initcall(asids_init);
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 37b93166bf22..c340f947baa0 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -4,6 +4,8 @@
#include "jz4780.dtsi"
#include <dt-bindings/clock/ingenic,tcu.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
/ {
compatible = "img,ci20", "ingenic,jz4780";
@@ -163,63 +165,71 @@
regulators {
vddcore: SUDCDC1 {
- regulator-name = "VDDCORE";
+ regulator-name = "DCDC_REG1";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1100000>;
regulator-always-on;
};
vddmem: SUDCDC2 {
- regulator-name = "VDDMEM";
+ regulator-name = "DCDC_REG2";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
};
vcc_33: SUDCDC3 {
- regulator-name = "VCC33";
+ regulator-name = "DCDC_REG3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vcc_50: SUDCDC4 {
- regulator-name = "VCC50";
+ regulator-name = "SUDCDC_REG4";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
};
vcc_25: LDO_REG5 {
- regulator-name = "VCC25";
+ regulator-name = "LDO_REG5";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
};
wifi_io: LDO_REG6 {
- regulator-name = "WIFIIO";
+ regulator-name = "LDO_REG6";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
};
vcc_28: LDO_REG7 {
- regulator-name = "VCC28";
+ regulator-name = "LDO_REG7";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-always-on;
};
vcc_15: LDO_REG8 {
- regulator-name = "VCC15";
+ regulator-name = "LDO_REG8";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
};
- vcc_18: LDO_REG9 {
- regulator-name = "VCC18";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ vrtc_18: LDO_REG9 {
+ regulator-name = "LDO_REG9";
+ /* Despite the datasheet stating 3.3V
+ * for REG9 and the driver expecting that,
+ * REG9 outputs 1.8V.
+ * Likely the CI20 uses a proprietary
+ * factory programmed chip variant.
+ * Since this is a simple on/off LDO the
+ * exact values do not matter.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vcc_11: LDO_REG10 {
- regulator-name = "VCC11";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
+ regulator-name = "LDO_REG10";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
regulator-always-on;
};
};
@@ -261,7 +271,9 @@
rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
- interrupts = <110>;
+
+ interrupt-parent = <&gpf>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 1ac2752fb791..a7b469d89e2c 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -605,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p)
* If we're configured to take boot arguments from DT, look for those
* now.
*/
- if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+ IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e745abc5457a..245be4fafe13 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
* oprofile_cpu_type already has a value, then we are
* possibly overriding a real PVR with a logical one,
* and, in that case, keep the current value for
- * oprofile_cpu_type.
+ * oprofile_cpu_type. Futhermore, let's ensure that the
+ * fix for the PMAO bug is enabled on compatibility mode.
*/
if (old.oprofile_cpu_type != NULL) {
t->oprofile_cpu_type = old.oprofile_cpu_type;
t->oprofile_type = old.oprofile_type;
+ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
}
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 2462cd7c565c..d0854320bb50 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -331,11 +331,13 @@ int hw_breakpoint_handler(struct die_args *args)
}
info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
- if (!dar_within_range(regs->dar, info))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
-
- if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info))
- goto out;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (!dar_within_range(regs->dar, info))
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ } else {
+ if (!stepping_handler(regs, bp, info))
+ goto out;
+ }
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b4c89a1acebb..a32d478a7f41 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -303,6 +303,12 @@ SECTIONS
*(.branch_lt)
}
+#ifdef CONFIG_DEBUG_INFO_BTF
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
+ *(.BTF)
+ }
+#endif
+
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
KEEP(*(.opd))
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ef7b1119b2e2..1c07d5a3f543 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -373,7 +373,9 @@ static inline bool flush_coherent_icache(unsigned long addr)
*/
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
mb(); /* sync */
+ allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
icbi((void *)addr);
+ prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
mb(); /* sync */
isync();
return true;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 73f029eae0cc..1a3b5a5276be 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -121,6 +121,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
+ depends on MMU
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SELECT_MEMORY_MODEL
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index d325b67d00df..3078b2de0b2d 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -10,4 +10,28 @@ config SOC_SIFIVE
help
This enables support for SiFive SoC platform hardware.
+config SOC_VIRT
+ bool "QEMU Virt Machine"
+ select VIRTIO_PCI
+ select VIRTIO_BALLOON
+ select VIRTIO_MMIO
+ select VIRTIO_CONSOLE
+ select VIRTIO_NET
+ select NET_9P_VIRTIO
+ select VIRTIO_BLK
+ select SCSI_VIRTIO
+ select DRM_VIRTIO_GPU
+ select HW_RANDOM_VIRTIO
+ select RPMSG_CHAR
+ select RPMSG_VIRTIO
+ select CRYPTO_DEV_VIRTIO
+ select VIRTIO_INPUT
+ select POWER_RESET_SYSCON
+ select POWER_RESET_SYSCON_POWEROFF
+ select GOLDFISH
+ select RTC_DRV_GOLDFISH
+ select SIFIVE_PLIC
+ help
+ This enables support for QEMU Virt Machine.
+
endmenu
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index b9009a2fbaf5..259cb53d7f20 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -13,8 +13,10 @@ LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux := --no-relax
endif
-KBUILD_AFLAGS_MODULE += -fPIC
-KBUILD_CFLAGS_MODULE += -fPIC
+
+ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+KBUILD_CFLAGS_MODULE += -mcmodel=medany
+endif
export BITS
ifeq ($(CONFIG_ARCH_RV64I),y)
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 609198cb1163..4a2729f5ca3f 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -2,6 +2,7 @@
/* Copyright (c) 2018-2019 SiFive, Inc */
#include "fu540-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
#define RTCCLK_FREQ 1000000
@@ -41,6 +42,10 @@
clock-frequency = <RTCCLK_FREQ>;
clock-output-names = "rtcclk";
};
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
};
&uart0 {
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index e2ff95cb3390..c8f084203067 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -15,6 +15,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_SOC_SIFIVE=y
+CONFIG_SOC_VIRT=y
CONFIG_SMP=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -30,7 +31,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@@ -38,15 +38,12 @@ CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
@@ -57,15 +54,13 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
-CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_POWER_RESET=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -78,12 +73,7 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_VIRTIO=y
+CONFIG_RTC_CLASS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -98,7 +88,6 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_PAGEALLOC=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index eb519407c841..a844920a261f 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_MODULES=y
@@ -30,7 +31,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@@ -38,15 +38,12 @@ CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
@@ -57,13 +54,11 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
-CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_POWER_RESET=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -74,13 +69,7 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_VIRTIO=y
-CONFIG_SIFIVE_PLIC=y
+CONFIG_RTC_CLASS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -95,7 +84,6 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_PAGEALLOC=y
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 42347d0981e7..49350c8bd7b0 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -28,13 +28,6 @@ static inline int syscall_get_nr(struct task_struct *task,
return regs->a7;
}
-static inline void syscall_set_nr(struct task_struct *task,
- struct pt_regs *regs,
- int sysno)
-{
- regs->a7 = sysno;
-}
-
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index bad4d85b5e91..208702d8c18e 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -229,19 +229,12 @@ check_syscall_nr:
li t0, __NR_syscalls
la s0, sys_ni_syscall
/*
- * The tracer can change syscall number to valid/invalid value.
- * We use syscall_set_nr helper in syscall_trace_enter thus we
- * cannot trust the current value in a7 and have to reload from
- * the current task pt_regs.
- */
- REG_L a7, PT_A7(sp)
- /*
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
bge a7, t0, 1f
/*
- * Check if syscall is rejected by tracer or seccomp, i.e., a7 == -1.
+ * Check if syscall is rejected by tracer, i.e., a7 == -1.
* If yes, we pretend it was executed.
*/
li t1, -1
@@ -334,6 +327,7 @@ work_resched:
handle_syscall_trace_enter:
move a0, sp
call do_syscall_trace_enter
+ move t0, a0
REG_L a0, PT_A0(sp)
REG_L a1, PT_A1(sp)
REG_L a2, PT_A2(sp)
@@ -342,6 +336,7 @@ handle_syscall_trace_enter:
REG_L a5, PT_A5(sp)
REG_L a6, PT_A6(sp)
REG_L a7, PT_A7(sp)
+ bnez t0, ret_from_syscall_rejected
j check_syscall_nr
handle_syscall_trace_exit:
move a0, sp
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index b7401858d872..8bbe5dbe1341 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -8,6 +8,10 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
@@ -386,3 +390,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return 0;
}
+
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+#define VMALLOC_MODULE_START \
+ max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
+ VMALLOC_END, GFP_KERNEL,
+ PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+#endif
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 407464201b91..444dc7b0fd78 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -148,21 +148,19 @@ long arch_ptrace(struct task_struct *child, long request,
* Allows PTRACE_SYSCALL to work. These are called from entry.S in
* {handle,ret_from}_syscall.
*/
-__visible void do_syscall_trace_enter(struct pt_regs *regs)
+__visible int do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (tracehook_report_syscall_entry(regs))
- syscall_set_nr(current, regs, -1);
+ return -1;
/*
* Do the secure computing after ptrace; failures should be fast.
* If this fails we might have return value in a0 from seccomp
* (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (secure_computing() == -1) {
- syscall_set_nr(current, regs, -1);
- return;
- }
+ if (secure_computing() == -1)
+ return -1;
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
@@ -170,6 +168,7 @@ __visible void do_syscall_trace_enter(struct pt_regs *regs)
#endif
audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
+ return 0;
}
__visible void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 965a8cf4829c..fab855963c73 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -131,7 +131,7 @@ void __init setup_bootmem(void)
for_each_memblock(memory, reg) {
phys_addr_t end = reg->base + reg->size;
- if (reg->base <= vmlinux_end && vmlinux_end <= end) {
+ if (reg->base <= vmlinux_start && vmlinux_end <= end) {
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 137a3920ca36..6d7c3b7e9281 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -752,6 +752,12 @@ static inline int pmd_write(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
static inline int pmd_dirty(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bc61ea18e88d..60716d18ce5a 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -424,7 +424,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
if (zpci_use_mio(zdev))
pdev->resource[i].start =
- (resource_size_t __force) zdev->bars[i].mio_wb;
+ (resource_size_t __force) zdev->bars[i].mio_wt;
else
pdev->resource[i].start = (resource_size_t __force)
pci_iomap_range_fh(pdev, i, 0, 0);
@@ -531,7 +531,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
flags |= IORESOURCE_MEM_64;
if (zpci_use_mio(zdev))
- addr = (unsigned long) zdev->bars[i].mio_wb;
+ addr = (unsigned long) zdev->bars[i].mio_wt;
else
addr = ZPCI_ADDR(entry);
size = 1UL << zdev->bars[i].size;
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 0ff86391f77d..e98304d0219e 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1508,6 +1508,7 @@ static int vector_set_coalesce(struct net_device *netdev,
}
static const struct ethtool_ops vector_net_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS,
.get_drvinfo = vector_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 94df0868804b..513a55562d75 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
+adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index b69e00bf20b8..8c2e9eadee8a 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)
sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)
sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
+adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no)
obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
@@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
-obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+
+# These modules require the assembler to support ADX.
+ifeq ($(adx_supported),yes)
+ obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+endif
# These modules require assembler to support AVX.
ifeq ($(avx_supported),yes)
diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
index 02c6ef8f7667..07344d82e88e 100644
--- a/arch/x86/include/asm/io_bitmap.h
+++ b/arch/x86/include/asm/io_bitmap.h
@@ -19,7 +19,14 @@ struct task_struct;
void io_bitmap_share(struct task_struct *tsk);
void io_bitmap_exit(void);
-void tss_update_io_bitmap(void);
+void native_tss_update_io_bitmap(void);
+
+#ifdef CONFIG_PARAVIRT_XXL
+#include <asm/paravirt.h>
+#else
+#define tss_update_io_bitmap native_tss_update_io_bitmap
+#endif
+
#else
static inline void io_bitmap_share(struct task_struct *tsk) { }
static inline void io_bitmap_exit(void) { }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 86e7317eb31f..694d8daf4983 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
}
+#ifdef CONFIG_X86_IOPL_IOPERM
+static inline void tss_update_io_bitmap(void)
+{
+ PVOP_VCALL0(cpu.update_io_bitmap);
+}
+#endif
+
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 84812964d3dd..732f62e04ddb 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -140,6 +140,10 @@ struct pv_cpu_ops {
void (*load_sp0)(unsigned long sp0);
+#ifdef CONFIG_X86_IOPL_IOPERM
+ void (*update_io_bitmap)(void);
+#endif
+
void (*wbinvd)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 52c9bfbbdb2a..4cdb123ff66a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -445,7 +445,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
* cpuid bit to be set. We need to ensure that we
* update that bit in this CPU's "cpu_info".
*/
- get_cpu_cap(c);
+ set_cpu_cap(c, X86_FEATURE_OSPKE);
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d817f255aed8..6efe0410fb72 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void)
}
}
+static bool pv_tlb_flush_supported(void)
+{
+ return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+ !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+}
+
+static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
+
#ifdef CONFIG_SMP
+
+static bool pv_ipi_supported(void)
+{
+ return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
+}
+
+static bool pv_sched_yield_supported(void)
+{
+ return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
+ !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+}
+
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
static void __send_ipi_mask(const struct cpumask *mask, int vector)
@@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned int this_cpu = smp_processor_id();
- struct cpumask new_mask;
+ struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
const struct cpumask *local_mask;
- cpumask_copy(&new_mask, mask);
- cpumask_clear_cpu(this_cpu, &new_mask);
- local_mask = &new_mask;
+ cpumask_copy(new_mask, mask);
+ cpumask_clear_cpu(this_cpu, new_mask);
+ local_mask = new_mask;
__send_ipi_mask(local_mask, vector);
}
@@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void)
update_intr_gate(X86_TRAP_PF, async_page_fault);
}
-static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
@@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
u8 state;
int cpu;
struct kvm_steal_time *src;
- struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
+ struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
cpumask_copy(flushmask, cpumask);
/*
@@ -619,11 +640,10 @@ static void __init kvm_guest_init(void)
pv_ops.time.steal_clock = kvm_steal_clock;
}
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ if (pv_tlb_flush_supported()) {
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
+ pr_info("KVM setup pv remote TLB flush\n");
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -632,9 +652,7 @@ static void __init kvm_guest_init(void)
#ifdef CONFIG_SMP
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
- if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ if (pv_sched_yield_supported()) {
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
pr_info("KVM setup pv sched yield\n");
}
@@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void)
static void __init kvm_apic_init(void)
{
#if defined(CONFIG_SMP)
- if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
+ if (pv_ipi_supported())
kvm_setup_pv_ipi();
#endif
}
@@ -732,26 +750,31 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
-static __init int kvm_setup_pv_tlb_flush(void)
+static __init int kvm_alloc_cpumask(void)
{
int cpu;
+ bool alloc = false;
if (!kvm_para_available() || nopv)
return 0;
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ if (pv_tlb_flush_supported())
+ alloc = true;
+
+#if defined(CONFIG_SMP)
+ if (pv_ipi_supported())
+ alloc = true;
+#endif
+
+ if (alloc)
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
+ zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
}
- pr_info("KVM setup pv remote TLB flush\n");
- }
return 0;
}
-arch_initcall(kvm_setup_pv_tlb_flush);
+arch_initcall(kvm_alloc_cpumask);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 789f5e4f89de..c131ba4e70ef 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -30,6 +30,7 @@
#include <asm/timer.h>
#include <asm/special_insns.h>
#include <asm/tlb.h>
+#include <asm/io_bitmap.h>
/*
* nop stub, which must not clobber anything *including the stack* to
@@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {
.cpu.iret = native_iret,
.cpu.swapgs = native_swapgs,
+#ifdef CONFIG_X86_IOPL_IOPERM
+ .cpu.update_io_bitmap = native_tss_update_io_bitmap,
+#endif
+
.cpu.start_context_switch = paravirt_nop,
.cpu.end_context_switch = paravirt_nop,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 839b5244e3b7..3053c85e0e42 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
/**
* tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
*/
-void tss_update_io_bitmap(void)
+void native_tss_update_io_bitmap(void)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct thread_struct *t = &current->thread;
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 991019d5eee1..1bb4927030af 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -59,6 +59,19 @@ config KVM
If unsure, say N.
+config KVM_WERROR
+ bool "Compile KVM with -Werror"
+ # KASAN may cause the build to fail due to larger frames
+ default y if X86_64 && !KASAN
+ # We use the dependency on !COMPILE_TEST to not be enabled
+ # blindly in allmodconfig or allyesconfig configurations
+ depends on (X86_64 && !KASAN) || !COMPILE_TEST
+ depends on EXPERT
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+
+ If in doubt, say "N".
+
config KVM_INTEL
tristate "KVM for Intel (and compatible) processors support"
depends on KVM && IA32_FEAT_CTL
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index b19ef421084d..e553f0fdd87d 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Iarch/x86/kvm
+ccflags-$(CONFIG_KVM_WERROR) += -Werror
KVM := ../../../virt/kvm
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ad3f5b178a03..24c0b2ba8fb9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -57,11 +57,13 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+#ifdef MODULE
static const struct x86_cpu_id svm_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_SVM),
{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
+#endif
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1
@@ -2194,8 +2196,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
static int avic_init_vcpu(struct vcpu_svm *svm)
{
int ret;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
- if (!kvm_vcpu_apicv_active(&svm->vcpu))
+ if (!avic || !irqchip_in_kernel(vcpu->kvm))
return 0;
ret = avic_init_backing_page(&svm->vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 63aaf44edd1f..40b1e6138cd5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -64,11 +64,13 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+#ifdef MODULE
static const struct x86_cpu_id vmx_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_VMX),
{}
};
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+#endif
bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
@@ -7175,6 +7177,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
else
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
+ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
}
@@ -7204,6 +7207,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
case x86_intercept_outs:
return vmx_check_intercept_io(vcpu, info);
+ case x86_intercept_lgdt:
+ case x86_intercept_lidt:
+ case x86_intercept_lldt:
+ case x86_intercept_ltr:
+ case x86_intercept_sgdt:
+ case x86_intercept_sidt:
+ case x86_intercept_sldt:
+ case x86_intercept_str:
+ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
+ return X86EMUL_CONTINUE;
+
+ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
+ break;
+
/* TODO: check more intercepts... */
default:
break;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 359fcd395132..5de200663f51 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7190,15 +7190,15 @@ static void kvm_timer_init(void)
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
- struct cpufreq_policy policy;
+ struct cpufreq_policy *policy;
int cpu;
- memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
- cpufreq_get_policy(&policy, cpu);
- if (policy.cpuinfo.max_freq)
- max_tsc_khz = policy.cpuinfo.max_freq;
+ policy = cpufreq_cpu_get(cpu);
+ if (policy && policy->cpuinfo.max_freq)
+ max_tsc_khz = policy->cpuinfo.max_freq;
put_cpu();
+ cpufreq_cpu_put(policy);
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -7308,12 +7308,12 @@ int kvm_arch_init(void *opaque)
}
if (!ops->cpu_has_kvm_support()) {
- printk(KERN_ERR "kvm: no hardware support\n");
+ pr_err_ratelimited("kvm: no hardware support\n");
r = -EOPNOTSUPP;
goto out;
}
if (ops->disabled_by_bios()) {
- printk(KERN_ERR "kvm: disabled by bios\n");
+ pr_err_ratelimited("kvm: disabled by bios\n");
r = -EOPNOTSUPP;
goto out;
}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 64229dad7eab..69309cd56fdf 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -363,13 +363,8 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m,
{
const struct ptdump_range ptdump_ranges[] = {
#ifdef CONFIG_X86_64
-
-#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
-#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \
- normalize_addr_shift)
-
{0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
- {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
+ {GUARD_HOLE_END_ADDR, ~0UL},
#else
{0, ~0UL},
#endif
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index fa8506e76bbe..d19a2edd63cb 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -180,7 +180,7 @@ void efi_sync_low_kernel_mappings(void)
static inline phys_addr_t
virt_to_phys_or_null_size(void *va, unsigned long size)
{
- bool bad_size;
+ phys_addr_t pa;
if (!va)
return 0;
@@ -188,16 +188,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
if (virt_addr_valid(va))
return virt_to_phys(va);
- /*
- * A fully aligned variable on the stack is guaranteed not to
- * cross a page bounary. Try to catch strings on the stack by
- * checking that 'size' is a power of two.
- */
- bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+ pa = slow_virt_to_phys(va);
- WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+ /* check if the object crosses a page boundary */
+ if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
+ return 0;
- return slow_virt_to_phys(va);
+ return pa;
}
#define virt_to_phys_or_null(addr) \
@@ -568,85 +565,25 @@ efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
- efi_status_t status;
- u32 phys_tm, phys_tc;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_tm = virt_to_phys_or_null(tm);
- phys_tc = virt_to_phys_or_null(tc);
-
- status = efi_thunk(get_time, phys_tm, phys_tc);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
- efi_status_t status;
- u32 phys_tm;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_tm = virt_to_phys_or_null(tm);
-
- status = efi_thunk(set_time, phys_tm);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
efi_time_t *tm)
{
- efi_status_t status;
- u32 phys_enabled, phys_pending, phys_tm;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_enabled = virt_to_phys_or_null(enabled);
- phys_pending = virt_to_phys_or_null(pending);
- phys_tm = virt_to_phys_or_null(tm);
-
- status = efi_thunk(get_wakeup_time, phys_enabled,
- phys_pending, phys_tm);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
- efi_status_t status;
- u32 phys_tm;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_tm = virt_to_phys_or_null(tm);
-
- status = efi_thunk(set_wakeup_time, enabled, phys_tm);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static unsigned long efi_name_size(efi_char16_t *name)
@@ -658,6 +595,8 @@ static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 *attr, unsigned long *data_size, void *data)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
efi_status_t status;
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
@@ -665,14 +604,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
spin_lock_irqsave(&efi_runtime_lock, flags);
+ *vnd = *vendor;
+
phys_data_size = virt_to_phys_or_null(data_size);
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_attr = virt_to_phys_or_null(attr);
phys_data = virt_to_phys_or_null_size(data, *data_size);
- status = efi_thunk(get_variable, phys_name, phys_vendor,
- phys_attr, phys_data_size, phys_data);
+ if (!phys_name || (data && !phys_data))
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(get_variable, phys_name, phys_vendor,
+ phys_attr, phys_data_size, phys_data);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
@@ -683,19 +627,25 @@ static efi_status_t
efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
unsigned long flags;
spin_lock_irqsave(&efi_runtime_lock, flags);
+ *vnd = *vendor;
+
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
- /* If data_size is > sizeof(u32) we've got problems */
- status = efi_thunk(set_variable, phys_name, phys_vendor,
- attr, data_size, phys_data);
+ if (!phys_name || !phys_data)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
@@ -707,6 +657,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
unsigned long flags;
@@ -714,13 +666,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
return EFI_NOT_READY;
+ *vnd = *vendor;
+
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
- /* If data_size is > sizeof(u32) we've got problems */
- status = efi_thunk(set_variable, phys_name, phys_vendor,
- attr, data_size, phys_data);
+ if (!phys_name || !phys_data)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
@@ -732,39 +688,36 @@ efi_thunk_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
unsigned long flags;
spin_lock_irqsave(&efi_runtime_lock, flags);
+ *vnd = *vendor;
+
phys_name_size = virt_to_phys_or_null(name_size);
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_name = virt_to_phys_or_null_size(name, *name_size);
- status = efi_thunk(get_next_variable, phys_name_size,
- phys_name, phys_vendor);
+ if (!phys_name)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(get_next_variable, phys_name_size,
+ phys_name, phys_vendor);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ *vendor = *vnd;
return status;
}
static efi_status_t
efi_thunk_get_next_high_mono_count(u32 *count)
{
- efi_status_t status;
- u32 phys_count;
- unsigned long flags;
-
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_count = virt_to_phys_or_null(count);
- status = efi_thunk(get_next_high_mono_count, phys_count);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static void
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 79409120a603..507f4fb88fa7 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -72,6 +72,9 @@
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
+#ifdef CONFIG_X86_IOPL_IOPERM
+#include <asm/io_bitmap.h>
+#endif
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0)
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}
+#ifdef CONFIG_X86_IOPL_IOPERM
+static void xen_update_io_bitmap(void)
+{
+ struct physdev_set_iobitmap iobitmap;
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+
+ native_tss_update_io_bitmap();
+
+ iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
+ tss->x86_tss.io_bitmap_base;
+ if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
+ iobitmap.nr_ports = 0;
+ else
+ iobitmap.nr_ports = IO_BITMAP_BITS;
+
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
+}
+#endif
+
static void xen_io_delay(void)
{
}
@@ -1047,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.write_idt_entry = xen_write_idt_entry,
.load_sp0 = xen_load_sp0,
+#ifdef CONFIG_X86_IOPL_IOPERM
+ .update_io_bitmap = xen_update_io_bitmap,
+#endif
.io_delay = xen_io_delay,
/* Xen takes care of %gs when switching to usermode for us */
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 09b69a3ed490..f0ff6654af28 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -610,12 +610,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
*/
entity = &bfqg->entity;
for_each_entity(entity) {
- bfqg = container_of(entity, struct bfq_group, entity);
- if (bfqg != bfqd->root_group) {
- parent = bfqg_parent(bfqg);
+ struct bfq_group *curr_bfqg = container_of(entity,
+ struct bfq_group, entity);
+ if (curr_bfqg != bfqd->root_group) {
+ parent = bfqg_parent(curr_bfqg);
if (!parent)
parent = bfqd->root_group;
- bfq_group_set_parent(bfqg, parent);
+ bfq_group_set_parent(curr_bfqg, parent);
}
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 089e890ab208..60dc9552ef8d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1663,12 +1663,6 @@ int kblockd_schedule_work(struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work)
-{
- return queue_work_on(cpu, kblockd_workqueue, work);
-}
-EXPORT_SYMBOL(kblockd_schedule_work_on);
-
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3f977c517960..5cc775bdb06a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -412,7 +412,7 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, false, false);
return;
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index ca22afd47b3d..856356b1619e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
bool has_sched,
struct request *rq)
{
- /* dispatch flush rq directly */
- if (rq->rq_flags & RQF_FLUSH_SEQ) {
- spin_lock(&hctx->lock);
- list_add(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ /*
+ * dispatch flush and passthrough rq directly
+ *
+ * passthrough request has to be added to hctx->dispatch directly.
+ * For some reason, device may be in one situation which can't
+ * handle FS request, so STS_RESOURCE is always returned and the
+ * FS request will be added to hctx->dispatch. However passthrough
+ * request may be required at that time for fixing the problem. If
+ * passthrough request is added to scheduler queue, there isn't any
+ * chance to dispatch it given we prioritize requests in hctx->dispatch.
+ */
+ if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
return true;
- }
if (has_sched)
rq->rq_flags |= RQF_SORTED;
@@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
WARN_ON(e && (rq->tag != -1));
- if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
+ if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+ blk_mq_request_bypass_insert(rq, at_head, false);
goto run;
+ }
if (e && e->type->ops.insert_requests) {
LIST_HEAD(list);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index fbacde454718..586c9d6e904a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -183,8 +183,8 @@ found_tag:
return tag + tag_offset;
}
-void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
- struct blk_mq_ctx *ctx, unsigned int tag)
+void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
+ unsigned int tag)
{
if (!blk_mq_tag_is_reserved(tags, tag)) {
const int real_tag = tag - tags->nr_reserved_tags;
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 15bc74acb57e..2b8321efb682 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -26,8 +26,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
-extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
- struct blk_mq_ctx *ctx, unsigned int tag);
+extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
+ unsigned int tag);
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a12b1763508d..d92088dec6c3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -477,9 +477,9 @@ static void __blk_mq_free_request(struct request *rq)
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;
if (rq->tag != -1)
- blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
+ blk_mq_put_tag(hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
- blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
+ blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
@@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, false, false);
else
blk_mq_sched_insert_request(rq, true, false, false);
}
@@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
q->mq_ops->commit_rqs(hctx);
spin_lock(&hctx->lock);
- list_splice_init(list, &hctx->dispatch);
+ list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
/*
@@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device.
*/
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+ bool run_queue)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock);
- list_add_tail(&rq->queuelist, &hctx->dispatch);
+ if (at_head)
+ list_add(&rq->queuelist, &hctx->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &hctx->dispatch);
spin_unlock(&hctx->lock);
if (run_queue)
@@ -1849,7 +1853,7 @@ insert:
if (bypass_insert)
return BLK_STS_RESOURCE;
- blk_mq_request_bypass_insert(rq, run_queue);
+ blk_mq_request_bypass_insert(rq, false, run_queue);
return BLK_STS_OK;
}
@@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
- blk_mq_request_bypass_insert(rq, true);
+ blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);
@@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_STS_OK) {
if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) {
- blk_mq_request_bypass_insert(rq,
+ blk_mq_request_bypass_insert(rq, false,
list_empty(list));
break;
}
@@ -3398,7 +3402,6 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
}
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
unsigned long ret = 0;
@@ -3431,7 +3434,6 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
}
static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct hrtimer_sleeper hs;
@@ -3451,7 +3453,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
if (q->poll_nsec > 0)
nsecs = q->poll_nsec;
else
- nsecs = blk_mq_poll_nsecs(q, hctx, rq);
+ nsecs = blk_mq_poll_nsecs(q, rq);
if (!nsecs)
return false;
@@ -3506,7 +3508,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return false;
}
- return blk_mq_poll_hybrid_sleep(q, hctx, rq);
+ return blk_mq_poll_hybrid_sleep(q, rq);
}
/**
diff --git a/block/blk-mq.h b/block/blk-mq.h
index eaaca8fc1c28..10bfdfb494fa 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
*/
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head);
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+ bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
@@ -199,7 +200,7 @@ static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
+ blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = -1;
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index b5516b04ffc0..6e9ec6e3fe47 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
}
#endif
+static bool acpi_no_watchdog;
+
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
{
const struct acpi_table_wdat *wdat = NULL;
acpi_status status;
- if (acpi_disabled)
+ if (acpi_disabled || acpi_no_watchdog)
return NULL;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
@@ -88,6 +90,14 @@ bool acpi_has_watchdog(void)
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
+/* ACPI watchdog can be disabled on boot command line */
+static int __init disable_acpi_watchdog(char *str)
+{
+ acpi_no_watchdog = true;
+ return 1;
+}
+__setup("acpi_no_watchdog", disable_acpi_watchdog);
+
void __init acpi_watchdog_init(void)
{
const struct acpi_wdat_entry *entries;
@@ -126,12 +136,11 @@ void __init acpi_watchdog_init(void)
gas = &entries[i].register_region;
res.start = gas->address;
+ res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
res.flags = IORESOURCE_MEM;
- res.end = res.start + ALIGN(gas->access_width, 4) - 1;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
res.flags = IORESOURCE_IO;
- res.end = res.start + gas->access_width - 1;
} else {
pr_warn("Unsupported address space: %u\n",
gas->space_id);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a6b2082c24f8..e47c8a4c83db 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5228,6 +5228,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_dev = container_of(filp->private_data,
struct binder_device, miscdev);
}
+ refcount_inc(&binder_dev->ref);
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
@@ -5405,6 +5406,7 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_context *context = proc->context;
+ struct binder_device *device;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
@@ -5421,6 +5423,12 @@ static void binder_deferred_release(struct binder_proc *proc)
context->binder_context_mgr_node = NULL;
}
mutex_unlock(&context->context_mgr_node_lock);
+ device = container_of(proc->context, struct binder_device, context);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(context->name);
+ kfree(device);
+ }
+ proc->context = NULL;
binder_inner_proc_lock(proc);
/*
* Make sure proc stays alive after we
@@ -6077,6 +6085,7 @@ static int __init init_binder_device(const char *name)
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
binder_device->miscdev.name = name;
+ refcount_set(&binder_device->ref, 1);
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
mutex_init(&binder_device->context.context_mgr_node_lock);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index ae991097d14d..283d3cb9c16e 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/uidgid.h>
@@ -33,6 +34,7 @@ struct binder_device {
struct miscdevice miscdev;
struct binder_context context;
struct inode *binderfs_inode;
+ refcount_t ref;
};
/**
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e2580e5316a2..110e41f920c2 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
if (!name)
goto err;
+ refcount_set(&device->ref, 1);
device->binderfs_inode = inode;
device->context.binder_context_mgr_uid = INVALID_UID;
device->context.name = name;
@@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode)
ida_free(&binderfs_minors, device->miscdev.minor);
mutex_unlock(&binderfs_minors_mutex);
- kfree(device->context.name);
- kfree(device);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(device->context.name);
+ kfree(device);
+ }
}
/**
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 8db8c0fb5e2d..7af74fb450a0 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -91,7 +91,7 @@
#ifdef GENERAL_DEBUG
#define PRINTK(args...) printk(args)
#else
-#define PRINTK(args...)
+#define PRINTK(args...) do {} while (0)
#endif /* GENERAL_DEBUG */
#ifdef EXTRA_DEBUG
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index b8313a04422d..48efa7a047f3 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -111,7 +111,7 @@ config CFAG12864B
If unsure, say N.
config CFAG12864B_RATE
- int "Refresh rate (hertz)"
+ int "Refresh rate (hertz)"
depends on CFAG12864B
default "20"
---help---
@@ -329,7 +329,7 @@ config PANEL_LCD_PROTO
config PANEL_LCD_PIN_E
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+ int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
range -17 17
default 14
---help---
@@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E
config PANEL_LCD_PIN_RS
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+ int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
range -17 17
default 17
---help---
@@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS
config PANEL_LCD_PIN_RW
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+ int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
range -17 17
default 16
---help---
@@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW
config PANEL_LCD_PIN_SCL
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+ int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
range -17 17
default 1
---help---
@@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL
config PANEL_LCD_PIN_SDA
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+ int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
range -17 17
default 2
---help---
@@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA
config PANEL_LCD_PIN_BL
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+ int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
range -17 17
default 0
---help---
This describes the number of the parallel port pin to which the LCD 'BL' signal
- has been connected. It can be :
+ has been connected. It can be :
0 : no connection (eg: connected to ground)
1..17 : directly connected to any of these pins on the DB25 plug
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 874c259a8829..c0da3820454b 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -88,7 +88,7 @@ struct charlcd_priv {
int len;
} esc_seq;
- unsigned long long drvdata[0];
+ unsigned long long drvdata[];
};
#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd)
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index efb928e25aef..1cce409ce5ca 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
const struct of_device_id *match;
const struct img_ascii_lcd_config *cfg;
struct img_ascii_lcd_ctx *ctx;
- struct resource *res;
int err;
match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
@@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
&ctx->offset))
return -EINVAL;
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index fb8b7990f6fd..befc2722dbfc 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -718,6 +718,8 @@ static void __device_links_queue_sync_state(struct device *dev,
{
struct device_link *link;
+ if (!dev_has_sync_state(dev))
+ return;
if (dev->state_synced)
return;
@@ -745,25 +747,31 @@ static void __device_links_queue_sync_state(struct device *dev,
/**
* device_links_flush_sync_list - Call sync_state() on a list of devices
* @list: List of devices to call sync_state() on
+ * @dont_lock_dev: Device for which lock is already held by the caller
*
* Calls sync_state() on all the devices that have been queued for it. This
- * function is used in conjunction with __device_links_queue_sync_state().
+ * function is used in conjunction with __device_links_queue_sync_state(). The
+ * @dont_lock_dev parameter is useful when this function is called from a
+ * context where a device lock is already held.
*/
-static void device_links_flush_sync_list(struct list_head *list)
+static void device_links_flush_sync_list(struct list_head *list,
+ struct device *dont_lock_dev)
{
struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
list_del_init(&dev->links.defer_sync);
- device_lock(dev);
+ if (dev != dont_lock_dev)
+ device_lock(dev);
if (dev->bus->sync_state)
dev->bus->sync_state(dev);
else if (dev->driver && dev->driver->sync_state)
dev->driver->sync_state(dev);
- device_unlock(dev);
+ if (dev != dont_lock_dev)
+ device_unlock(dev);
put_device(dev);
}
@@ -801,7 +809,7 @@ void device_links_supplier_sync_state_resume(void)
out:
device_links_write_unlock();
- device_links_flush_sync_list(&sync_list);
+ device_links_flush_sync_list(&sync_list, NULL);
}
static int sync_state_resume_initcall(void)
@@ -813,7 +821,7 @@ late_initcall(sync_state_resume_initcall);
static void __device_links_supplier_defer_sync(struct device *sup)
{
- if (list_empty(&sup->links.defer_sync))
+ if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
list_add_tail(&sup->links.defer_sync, &deferred_sync);
}
@@ -865,6 +873,11 @@ void device_links_driver_bound(struct device *dev)
driver_deferred_probe_add(link->consumer);
}
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(dev);
+ else
+ __device_links_queue_sync_state(dev, &sync_list);
+
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
@@ -883,7 +896,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_unlock();
- device_links_flush_sync_list(&sync_list);
+ device_links_flush_sync_list(&sync_list, dev);
}
static void device_link_drop_managed(struct device_link *link)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7fa654f1288b..b5ce7b085795 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -363,10 +363,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
{
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- if (!pdev->dma_mask)
- pdev->dma_mask = DMA_BIT_MASK(32);
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dma_mask;
+ if (!pdev->dev.dma_mask) {
+ pdev->platform_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->platform_dma_mask;
+ }
};
/**
@@ -662,20 +662,8 @@ struct platform_device *platform_device_register_full(
pdev->dev.of_node_reused = pdevinfo->of_node_reused;
if (pdevinfo->dma_mask) {
- /*
- * This memory isn't freed when the device is put,
- * I don't have a nice idea for that though. Conceptually
- * dma_mask in struct device should not be a pointer.
- * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
- */
- pdev->dev.dma_mask =
- kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
- if (!pdev->dev.dma_mask)
- goto err;
-
- kmemleak_ignore(pdev->dev.dma_mask);
-
- *pdev->dev.dma_mask = pdevinfo->dma_mask;
+ pdev->platform_dma_mask = pdevinfo->dma_mask;
+ pdev->dev.dma_mask = &pdev->platform_dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
@@ -700,7 +688,6 @@ struct platform_device *platform_device_register_full(
if (ret) {
err:
ACPI_COMPANION_SET(&pdev->dev, NULL);
- kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 0b081dee1e95..de8d3543e8fe 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -608,6 +608,13 @@ static void software_node_release(struct kobject *kobj)
{
struct swnode *swnode = kobj_to_swnode(kobj);
+ if (swnode->parent) {
+ ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ list_del(&swnode->entry);
+ } else {
+ ida_simple_remove(&swnode_root_ids, swnode->id);
+ }
+
if (swnode->allocated) {
property_entries_free(swnode->node->properties);
kfree(swnode->node);
@@ -773,13 +780,6 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode)
if (!swnode)
return;
- if (swnode->parent) {
- ida_simple_remove(&swnode->parent->child_ids, swnode->id);
- list_del(&swnode->entry);
- } else {
- ida_simple_remove(&swnode_root_ids, swnode->id);
- }
-
kobject_put(&swnode->kobj);
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index bc837862b767..62b660821dbc 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -14,9 +14,6 @@
#include <linux/fault-inject.h>
struct nullb_cmd {
- struct list_head list;
- struct llist_node ll_list;
- struct __call_single_data csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 16510795e377..133060431dbd 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1518,8 +1518,6 @@ static int setup_commands(struct nullb_queue *nq)
for (i = 0; i < nq->queue_depth; i++) {
cmd = &nq->cmds[i];
- INIT_LIST_HEAD(&cmd->list);
- cmd->ll_list.next = NULL;
cmd->tag = -1U;
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 117cfc8cd05a..cda5cf917e9a 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -276,7 +276,7 @@ static const struct block_device_operations pcd_bdops = {
.release = pcd_block_release,
.ioctl = pcd_block_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = blkdev_compat_ptr_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
#endif
.check_events = pcd_block_check_events,
};
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 54158766334b..0736248999b0 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
- blk_mq_stop_hw_queue(hctx);
+ /* Don't stop the queue if -ENOMEM: we may have failed to
+ * bounce the buffer due to global resource outage.
+ */
+ if (err == -ENOSPC)
+ blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
- /* Out of mem doesn't actually happen, since we fall back
- * to direct descriptors */
- if (err == -ENOMEM || err == -ENOSPC)
+ switch (err) {
+ case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
- return BLK_STS_IOERR;
+ case -ENOMEM:
+ return BLK_STS_RESOURCE;
+ default:
+ return BLK_STS_IOERR;
+ }
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e2ad6bba2281..9df516a56bb2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -213,6 +213,7 @@ struct blkfront_info
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ unsigned int rinfo_size;
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
@@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
+#define for_each_rinfo(info, ptr, idx) \
+ for ((ptr) = (info)->rinfo, (idx) = 0; \
+ (idx) < (info)->nr_rings; \
+ (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
+
+static inline struct blkfront_ring_info *
+get_rinfo(const struct blkfront_info *info, unsigned int i)
+{
+ BUG_ON(i >= info->nr_rings);
+ return (void *)info->rinfo + i * info->rinfo_size;
+}
+
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{
unsigned long free = rinfo->shadow_free;
@@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
- BUG_ON(info->nr_rings <= qid);
- rinfo = &info->rinfo[qid];
+ rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
@@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors, i;
+ struct blkfront_ring_info *rinfo;
if (info->rq == NULL)
return;
@@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
-
+ for_each_rinfo(info, rinfo, i) {
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback);
@@ -1339,6 +1350,7 @@ free_shadow:
static void blkif_free(struct blkfront_info *info, int suspend)
{
unsigned int i;
+ struct blkfront_ring_info *rinfo;
/* Prevent new requests being issued until we fix things up. */
info->connected = suspend ?
@@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
if (info->rq)
blk_mq_stop_hw_queues(info->rq);
- for (i = 0; i < info->nr_rings; i++)
- blkif_free_ring(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i)
+ blkif_free_ring(rinfo);
kvfree(info->rinfo);
info->rinfo = NULL;
@@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
int err;
unsigned int i, max_page_order;
unsigned int ring_page_order;
+ struct blkfront_ring_info *rinfo;
if (!info)
return -ENODEV;
@@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (err)
goto destroy_blkring;
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
-
+ for_each_rinfo(info, rinfo, i) {
/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo);
if (err)
@@ -1815,7 +1826,7 @@ again:
/* We already got the number of queues/rings in _probe */
if (info->nr_rings == 1) {
- err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
+ err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
if (err)
goto destroy_blkring;
} else {
@@ -1837,10 +1848,10 @@ again:
goto abort_transaction;
}
- for (i = 0; i < info->nr_rings; i++) {
+ for_each_rinfo(info, rinfo, i) {
memset(path, 0, pathsize);
snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
- err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
+ err = write_per_ring_nodes(xbt, rinfo, path);
if (err) {
kfree(path);
goto destroy_blkring;
@@ -1868,9 +1879,8 @@ again:
goto destroy_blkring;
}
- for (i = 0; i < info->nr_rings; i++) {
+ for_each_rinfo(info, rinfo, i) {
unsigned int j;
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1;
@@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
{
unsigned int backend_max_queues;
unsigned int i;
+ struct blkfront_ring_info *rinfo;
BUG_ON(info->nr_rings);
@@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info)
if (!info->nr_rings)
info->nr_rings = 1;
- info->rinfo = kvcalloc(info->nr_rings,
- struct_size(info->rinfo, shadow,
- BLK_RING_SIZE(info)),
- GFP_KERNEL);
+ info->rinfo_size = struct_size(info->rinfo, shadow,
+ BLK_RING_SIZE(info));
+ info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
return -ENOMEM;
}
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
INIT_LIST_HEAD(&rinfo->indirect_pages);
INIT_LIST_HEAD(&rinfo->grants);
rinfo->dev_info = info;
@@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info)
int rc;
struct bio *bio;
unsigned int segs;
+ struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
@@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info)
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
-
+ for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
if (rc)
return rc;
@@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
+ for_each_rinfo(info, rinfo, r_index) {
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(rinfo);
}
@@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev)
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
unsigned int i, j;
+ struct blkfront_ring_info *rinfo;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests);
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow;
@@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int binfo;
char *envp[] = { "RESIZE=1", NULL };
int err, i;
+ struct blkfront_ring_info *rinfo;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info)
"physical-sector-size",
sector_size);
blkfront_gather_backend_features(info);
- for (i = 0; i < info->nr_rings; i++) {
- err = blkfront_setup_indirect(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i) {
+ err = blkfront_setup_indirect(rinfo);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
@@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info)
/* Kick pending requests. */
info->connected = BLKIF_STATE_CONNECTED;
- for (i = 0; i < info->nr_rings; i++)
- kick_pending_request_queues(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i)
+ kick_pending_request_queues(rinfo);
device_add_disk(&info->xbdev->dev, info->gd, NULL);
@@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info)
{
unsigned int i;
unsigned long flags;
+ struct blkfront_ring_info *rinfo;
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
spin_lock_irqsave(&rinfo->ring_lock, flags);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f702c85c81b6..6113fc0a52ae 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1400,7 +1400,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
/* 1-wire needs module's internal clocks enabled for reset */
-static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
+static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
{
int offset = 0x0c; /* HDQ_CTRL_STATUS */
u16 val;
@@ -1488,7 +1488,7 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
- ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
+ ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
return;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 886b2638c730..c51292c2a131 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -519,7 +519,7 @@ static const struct block_device_operations gdrom_bdops = {
.check_events = gdrom_bdops_check_events,
.ioctl = gdrom_bdops_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = blkdev_compat_ptr_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
#endif
};
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index c78127ccbc0d..638c693e17ad 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
else
io.slave_addr = slave_addr;
- io.irq = platform_get_irq(pdev, 0);
+ io.irq = platform_get_irq_optional(pdev, 0);
if (io.irq > 0)
io.irq_setup = ipmi_std_irq_setup;
else
@@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
io.irq = tmp;
io.irq_setup = acpi_gpe_irq_setup;
} else {
- int irq = platform_get_irq(pdev, 0);
+ int irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
io.irq = irq;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cbe6c94bf158..808874bccf4a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1076,9 +1076,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
pol = policy->last_policy;
} else if (def_gov) {
pol = cpufreq_parse_policy(def_gov->name);
- } else {
- return -ENODATA;
+ /*
+ * In case the default governor is neiter "performance"
+ * nor "powersave", fall back to the initial policy
+ * value set by the driver.
+ */
+ if (pol == CPUFREQ_POLICY_UNKNOWN)
+ pol = policy->policy;
}
+ if (pol != CPUFREQ_POLICY_PERFORMANCE &&
+ pol != CPUFREQ_POLICY_POWERSAVE)
+ return -ENODATA;
}
return cpufreq_set_policy(policy, gov, pol);
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index f078b2686418..f2756836093f 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -42,3 +42,14 @@ config CRYPTO_DEV_CHELSIO_TLS
To compile this driver as a module, choose M here: the module
will be called chtls.
+
+config CHELSIO_TLS_DEVICE
+ bool "Chelsio Inline KTLS Offload"
+ depends on CHELSIO_T4
+ depends on TLS_DEVICE
+ select CRYPTO_DEV_CHELSIO
+ default y
+ help
+ This flag enables support for kernel tls offload over Chelsio T6
+ crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
+ only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index a3c05e2f4562..0e9d035927e9 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -3,5 +3,8 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+chcr-objs += chcr_ktls.o
+#endif
chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
diff --git a/drivers/crypto/chelsio/chcr_common.h b/drivers/crypto/chelsio/chcr_common.h
new file mode 100644
index 000000000000..33f589cbfba1
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_common.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifndef __CHCR_COMMON_H__
+#define __CHCR_COMMON_H__
+
+#include "cxgb4.h"
+
+#define CHCR_MAX_SALT 4
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_CPL_TX_SEC_PDU_LEN_64BIT 2
+#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
+#define CHCR_SCMD_PROTO_VERSION_TLS 0
+#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
+#define AES_BLOCK_LEN 16
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
+
+struct chcr_dev {
+ spinlock_t lock_chcr_dev; /* chcr dev structure lock */
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
+ unsigned char tx_channel_id;
+};
+
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct chcr_dev dev;
+};
+
+struct ktls_key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[CHCR_MAX_SALT];
+ __be64 iv_to_auth;
+ unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE +
+ TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+};
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S 24
+#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
+
+#define KEY_CONTEXT_SALT_PRESENT_S 10
+#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S 0
+#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S 6
+#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
+
+#define KEY_CONTEXT_MK_SIZE_S 2
+#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S 11
+#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define FILL_KEY_CTX_HDR(ck_size, mk_size, ctx_len) \
+ htonl(KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+ KEY_CONTEXT_CK_SIZE_V(ck_size) | \
+ KEY_CONTEXT_VALID_F | \
+ KEY_CONTEXT_SALT_PRESENT_F | \
+ KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
+struct uld_ctx *assign_chcr_device(void);
+
+static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
+ void *pos, int length)
+{
+ int left = (void *)q->stat - pos;
+ u64 *p;
+
+ if (likely(length <= left)) {
+ memcpy(pos, src, length);
+ pos += length;
+ } else {
+ memcpy(pos, src, left);
+ memcpy(q->desc, src + left, length - left);
+ pos = (void *)q->desc + (length - left);
+ }
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8) {
+ *p = 0;
+ return p + 1;
+ }
+ return p;
+}
+
+static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline unsigned int chcr_sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+static inline unsigned int chcr_flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+#endif /* __CHCR_COMMON_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index e937605670ac..0015810214a9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -28,13 +28,17 @@
static struct chcr_driver_data drv_data;
-typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
-static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
+static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
+ [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
+#endif
};
static struct cxgb4_uld_info chcr_uld_info = {
@@ -45,9 +49,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
.tx_handler = chcr_uld_tx_handler,
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
};
static void detach_work_fn(struct work_struct *work)
@@ -150,14 +154,13 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
return 0;
}
-static int cpl_fw6_pld_handler(struct chcr_dev *dev,
+static int cpl_fw6_pld_handler(struct adapter *adap,
unsigned char *input)
{
struct crypto_async_request *req;
struct cpl_fw6_pld *fw6_pld;
u32 ack_err_status = 0;
int error_status = 0;
- struct adapter *adap = padap(dev);
fw6_pld = (struct cpl_fw6_pld *)input;
req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
@@ -205,6 +208,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
chcr_add_xfrmops(lld);
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_enable_ktls(padap(&u_ctx->dev));
+#endif
out:
return u_ctx;
}
@@ -214,26 +222,37 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
struct chcr_dev *dev = &u_ctx->dev;
+ struct adapter *adap = padap(dev);
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
- if (rpl->opcode != CPL_FW6_PLD) {
- pr_err("Unsupported opcode\n");
+ if (!work_handlers[rpl->opcode]) {
+ pr_err("Unsupported opcode %d received\n", rpl->opcode);
return 0;
}
if (!pgl)
- work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
+ work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
else
- work_handlers[rpl->opcode](dev, pgl->va);
+ work_handlers[rpl->opcode](adap, pgl->va);
return 0;
}
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
{
+ /* In case if skb's decrypted bit is set, it's nic tls packet, else it's
+ * ipsec packet.
+ */
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (skb->decrypted)
+ return chcr_ktls_xmit(skb, dev);
+#endif
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
return chcr_ipsec_xmit(skb, dev);
+#endif
+ return 0;
}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
static void chcr_detach_device(struct uld_ctx *u_ctx)
{
@@ -304,12 +323,20 @@ static void __exit chcr_crypto_exit(void)
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_disable_ktls(adap);
+#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_disable_ktls(adap);
+#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index ad874d548aa5..b5b371b8d343 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -222,4 +222,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+void chcr_enable_ktls(struct adapter *adap);
+void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
new file mode 100644
index 000000000000..f0c3834eda4f
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -0,0 +1,2021 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include "chcr_ktls.h"
+#include "clip_tbl.h"
+
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+/*
+ * chcr_ktls_save_keys: calculate and save crypto keys.
+ * @tx_info - driver specific tls info.
+ * @crypto_info - tls crypto information.
+ * @direction - TX/RX direction.
+ * return - SUCCESS/FAILURE.
+ */
+static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
+ struct tls_crypto_info *crypto_info,
+ enum tls_offload_ctx_dir direction)
+{
+ int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
+ unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+ struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
+ struct ktls_key_ctx *kctx = &tx_info->key_ctx;
+ struct crypto_cipher *cipher;
+ unsigned char *key, *salt;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ info_128_gcm =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
+
+ ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+ key = info_128_gcm->key;
+ salt = info_128_gcm->salt;
+ tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
+
+ /* The SCMD fields used when encrypting a full TLS
+ * record. Its a one time calculation till the
+ * connection exists.
+ */
+ tx_info->scmd0_seqno_numivs =
+ SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
+ SCMD_CIPH_AUTH_SEQ_CTRL_F |
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
+ SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
+ SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
+ SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
+ SCMD_NUM_IVS_V(1);
+
+ /* keys will be sent inline. */
+ tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
+
+ /* The SCMD fields used when encrypting a partial TLS
+ * record (no trailer and possibly a truncated payload).
+ */
+ tx_info->scmd0_short_seqno_numivs =
+ SCMD_CIPH_AUTH_SEQ_CTRL_F |
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
+ SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
+ SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
+
+ tx_info->scmd0_short_ivgen_hdrlen =
+ tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
+
+ break;
+
+ default:
+ pr_err("GCM: cipher type 0x%x not supported\n",
+ crypto_info->cipher_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
+ roundup(keylen, 16) + ghash_size;
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret)
+ goto out1;
+
+ memset(ghash_h, 0, ghash_size);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+
+ /* fill the Key context */
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ mac_key_size,
+ key_ctx_size >> 4);
+ } else {
+ ret = -EINVAL;
+ goto out1;
+ }
+
+ memcpy(kctx->salt, salt, tx_info->salt_size);
+ memcpy(kctx->key, key, keylen);
+ memcpy(kctx->key + keylen, ghash_h, ghash_size);
+ tx_info->key_ctx_len = key_ctx_size;
+
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
+ int new_state)
+{
+ unsigned long flags;
+
+ /* This function can be called from both rx (interrupt context) and tx
+ * queue contexts.
+ */
+ spin_lock_irqsave(&tx_info->lock, flags);
+ switch (tx_info->connection_state) {
+ case KTLS_CONN_CLOSED:
+ tx_info->connection_state = new_state;
+ break;
+
+ case KTLS_CONN_ACT_OPEN_REQ:
+ /* only go forward if state is greater than current state. */
+ if (new_state <= tx_info->connection_state)
+ break;
+ /* update to the next state and also initialize TCB */
+ tx_info->connection_state = new_state;
+ /* FALLTHRU */
+ case KTLS_CONN_ACT_OPEN_RPL:
+ /* if we are stuck in this state, means tcb init might not
+ * received by HW, try sending it again.
+ */
+ if (!chcr_init_tcb_fields(tx_info))
+ tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
+ break;
+
+ case KTLS_CONN_SET_TCB_REQ:
+ /* only go forward if state is greater than current state. */
+ if (new_state <= tx_info->connection_state)
+ break;
+ /* update to the next state and check if l2t_state is valid */
+ tx_info->connection_state = new_state;
+ /* FALLTHRU */
+ case KTLS_CONN_SET_TCB_RPL:
+ /* Check if l2t state is valid, then move to ready state. */
+ if (cxgb4_check_l2t_valid(tx_info->l2te)) {
+ tx_info->connection_state = KTLS_CONN_TX_READY;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
+ }
+ break;
+
+ case KTLS_CONN_TX_READY:
+ /* nothing to be done here */
+ break;
+
+ default:
+ pr_err("unknown KTLS connection state\n");
+ break;
+ }
+ spin_unlock_irqrestore(&tx_info->lock, flags);
+
+ return tx_info->connection_state;
+}
+/*
+ * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * @atid - connection active tid.
+ * return - send success/failure.
+ */
+static int chcr_ktls_act_open_req(struct sock *sk,
+ struct chcr_ktls_info *tx_info,
+ int atid)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct cpl_t6_act_open_req *cpl6;
+ struct cpl_act_open_req *cpl;
+ struct sk_buff *skb;
+ unsigned int len;
+ int qid_atid;
+ u64 options;
+
+ len = sizeof(*cpl6);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ /* mark it a control pkt */
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+ cpl6 = __skb_put_zero(skb, len);
+ cpl = (struct cpl_act_open_req *)cpl6;
+ INIT_TP_WR(cpl6, 0);
+ qid_atid = TID_QID_V(tx_info->rx_qid) |
+ TID_TID_V(atid);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
+ cpl->local_port = inet->inet_sport;
+ cpl->peer_port = inet->inet_dport;
+ cpl->local_ip = inet->inet_rcv_saddr;
+ cpl->peer_ip = inet->inet_daddr;
+
+ /* fill first 64 bit option field. */
+ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
+ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
+ cpl->opt0 = cpu_to_be64(options);
+
+ /* next 64 bit option field. */
+ options =
+ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
+ cpl->opt2 = htonl(options);
+
+ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
+}
+
+/*
+ * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * @atid - connection active tid.
+ * return - send success/failure.
+ */
+static int chcr_ktls_act_open_req6(struct sock *sk,
+ struct chcr_ktls_info *tx_info,
+ int atid)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct cpl_t6_act_open_req6 *cpl6;
+ struct cpl_act_open_req6 *cpl;
+ struct sk_buff *skb;
+ unsigned int len;
+ int qid_atid;
+ u64 options;
+
+ len = sizeof(*cpl6);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ /* mark it a control pkt */
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+ cpl6 = __skb_put_zero(skb, len);
+ cpl = (struct cpl_act_open_req6 *)cpl6;
+ INIT_TP_WR(cpl6, 0);
+ qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
+ cpl->local_port = inet->inet_sport;
+ cpl->peer_port = inet->inet_dport;
+ cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
+ cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
+ cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
+ cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
+
+ /* first 64 bit option field. */
+ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
+ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
+ cpl->opt0 = cpu_to_be64(options);
+ /* next 64 bit option field. */
+ options =
+ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
+ cpl->opt2 = htonl(options);
+
+ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
+}
+
+/*
+ * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_setup_connection(struct sock *sk,
+ struct chcr_ktls_info *tx_info)
+{
+ struct tid_info *t = &tx_info->adap->tids;
+ int atid, ret = 0;
+
+ atid = cxgb4_alloc_atid(t, tx_info);
+ if (atid == -1)
+ return -EINVAL;
+
+ tx_info->atid = atid;
+ tx_info->ip_family = sk->sk_family;
+
+ if (sk->sk_family == AF_INET ||
+ (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ tx_info->ip_family = AF_INET;
+ ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+ } else {
+ tx_info->ip_family = AF_INET6;
+ ret =
+ cxgb4_clip_get(tx_info->netdev,
+ (const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8,
+ 1);
+ if (ret)
+ goto out;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ }
+
+ /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
+ * success, if any other return type clear atid and return that failure.
+ */
+ if (ret) {
+ if (ret == NET_XMIT_CN)
+ ret = 0;
+ else
+ cxgb4_free_atid(t, atid);
+ goto out;
+ }
+
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
+out:
+ return ret;
+}
+
+/*
+ * chcr_set_tcb_field: update tcb fields.
+ * @tx_info - driver specific tls info.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @no_reply - set 1 if not looking for TP response.
+ */
+static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
+ u64 mask, u64 val, int no_reply)
+{
+ struct cpl_set_tcb_field *req;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
+ INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
+ req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+ NO_REPLY_V(no_reply));
+ req->word_cookie = htons(TCB_WORD_V(word));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+ return cxgb4_ofld_send(tx_info->netdev, skb);
+}
+
+/*
+ * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP.
+ */
+static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
+{
+ return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
+ TCB_T_STATE_V(TCB_T_STATE_M),
+ CHCR_TCB_STATE_CLOSED, 1);
+}
+
+/*
+ * chcr_ktls_dev_del: call back for tls_dev_del.
+ * Remove the tid and l2t entry and close the connection.
+ * it per connection basis.
+ * @netdev - net device.
+ * @tls_cts - tls context.
+ * @direction - TX/RX crypto direction
+ */
+static void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx =
+ chcr_get_ktls_tx_context(tls_ctx);
+ struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ struct sock *sk;
+
+ if (!tx_info)
+ return;
+ sk = tx_info->sk;
+
+ spin_lock(&tx_info->lock);
+ tx_info->connection_state = KTLS_CONN_CLOSED;
+ spin_unlock(&tx_info->lock);
+
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(netdev,
+ (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
+ 1);
+
+ /* clear tid */
+ if (tx_info->tid != -1) {
+ /* clear tcb state and then release tid */
+ chcr_ktls_mark_tcb_close(tx_info);
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+ }
+
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+}
+
+/*
+ * chcr_ktls_dev_add: call back for tls_dev_add.
+ * Create a tcb entry for TP. Also add l2t entry for the connection. And
+ * generate keys & save those keys locally.
+ * @netdev - net device.
+ * @tls_cts - tls context.
+ * @direction - TX/RX crypto direction
+ * return: SUCCESS/FAILURE.
+ */
+static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ struct dst_entry *dst;
+ struct adapter *adap;
+ struct port_info *pi;
+ struct neighbour *n;
+ u8 daaddr[16];
+ int ret = -1;
+
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+
+ pi = netdev_priv(netdev);
+ adap = pi->adapter;
+ if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
+ pr_err("not expecting for RX direction\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (tx_ctx->chcr_info) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
+ if (!tx_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_init(&tx_info->lock);
+
+ /* clear connection state */
+ spin_lock(&tx_info->lock);
+ tx_info->connection_state = KTLS_CONN_CLOSED;
+ spin_unlock(&tx_info->lock);
+
+ tx_info->sk = sk;
+ /* initialize tid and atid to -1, 0 is a also a valid id. */
+ tx_info->tid = -1;
+ tx_info->atid = -1;
+
+ tx_info->adap = adap;
+ tx_info->netdev = netdev;
+ tx_info->first_qset = pi->first_qset;
+ tx_info->tx_chan = pi->tx_chan;
+ tx_info->smt_idx = pi->smt_idx;
+ tx_info->port_id = pi->port_id;
+
+ tx_info->rx_qid = chcr_get_first_rx_qid(adap);
+ if (unlikely(tx_info->rx_qid < 0))
+ goto out2;
+
+ tx_info->prev_seq = start_offload_tcp_sn;
+ tx_info->tcp_start_seq_number = start_offload_tcp_sn;
+
+ /* save crypto keys */
+ ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
+ if (ret < 0)
+ goto out2;
+
+ /* get peer ip */
+ if (sk->sk_family == AF_INET ||
+ (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ memcpy(daaddr, &sk->sk_daddr, 4);
+ } else {
+ memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ }
+
+ /* get the l2t index */
+ dst = sk_dst_get(sk);
+ if (!dst) {
+ pr_err("DST entry not found\n");
+ goto out2;
+ }
+ n = dst_neigh_lookup(dst, daaddr);
+ if (!n || !n->dev) {
+ pr_err("neighbour not found\n");
+ dst_release(dst);
+ goto out2;
+ }
+ tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
+
+ neigh_release(n);
+ dst_release(dst);
+
+ if (!tx_info->l2te) {
+ pr_err("l2t entry not found\n");
+ goto out2;
+ }
+
+ tx_ctx->chcr_info = tx_info;
+
+ /* create a filter and call cxgb4_l2t_send to send the packet out, which
+ * will take care of updating l2t entry in hw if not already done.
+ */
+ ret = chcr_setup_connection(sk, tx_info);
+ if (ret)
+ goto out2;
+
+ atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
+ return 0;
+out2:
+ kvfree(tx_info);
+out:
+ atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
+ return ret;
+}
+
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+
+/*
+ * chcr_enable_ktls: add NETIF_F_HW_TLS_TX flag in all the ports.
+ */
+void chcr_enable_ktls(struct adapter *adap)
+{
+ struct net_device *netdev;
+ int i;
+
+ for_each_port(adap, i) {
+ netdev = adap->port[i];
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &chcr_ktls_ops;
+ }
+}
+
+/*
+ * chcr_disable_ktls: remove NETIF_F_HW_TLS_TX flag from all the ports.
+ */
+void chcr_disable_ktls(struct adapter *adap)
+{
+ struct net_device *netdev;
+ int i;
+
+ for_each_port(adap, i) {
+ netdev = adap->port[i];
+ netdev->features &= ~NETIF_F_HW_TLS_TX;
+ netdev->hw_features &= ~NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = NULL;
+ }
+}
+
+/*
+ * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
+ * handling.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
+{
+ int ret = 0;
+
+ /* set tcb in offload and bypass */
+ ret =
+ chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
+ TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
+ if (ret)
+ return ret;
+ /* reset snd_una and snd_next fields in tcb */
+ ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
+ TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
+ TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+ 0, 1);
+ if (ret)
+ return ret;
+
+ /* reset send max */
+ ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
+ TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
+ 0, 1);
+ if (ret)
+ return ret;
+
+ /* update l2t index and request for tp reply to confirm tcb is
+ * initialised to handle tx traffic.
+ */
+ ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
+ TCB_L2T_IX_V(TCB_L2T_IX_M),
+ TCB_L2T_IX_V(tx_info->l2te->idx), 0);
+ return ret;
+}
+
+/*
+ * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
+ */
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
+{
+ const struct cpl_act_open_rpl *p = (void *)input;
+ struct chcr_ktls_info *tx_info = NULL;
+ unsigned int atid, tid, status;
+ struct tid_info *t;
+
+ tid = GET_TID(p);
+ status = AOPEN_STATUS_G(ntohl(p->atid_status));
+ atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
+
+ t = &adap->tids;
+ tx_info = lookup_atid(t, atid);
+
+ if (!tx_info || tx_info->atid != atid) {
+ pr_err("tx_info or atid is not correct\n");
+ return -1;
+ }
+
+ if (!status) {
+ tx_info->tid = tid;
+ cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+
+ cxgb4_free_atid(t, atid);
+ tx_info->atid = -1;
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info,
+ KTLS_CONN_ACT_OPEN_RPL);
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
+ */
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+{
+ const struct cpl_set_tcb_rpl *p = (void *)input;
+ struct chcr_ktls_info *tx_info = NULL;
+ struct tid_info *t;
+ u32 tid, status;
+
+ tid = GET_TID(p);
+ status = p->status;
+
+ t = &adap->tids;
+ tx_info = lookup_tid(t, tid);
+ if (!tx_info || tx_info->tid != tid) {
+ pr_err("tx_info or atid is not correct\n");
+ return -1;
+ }
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
+ return 0;
+}
+
+/*
+ * chcr_write_cpl_set_tcb_ulp: update tcb values.
+ * TCB is responsible to create tcp headers, so all the related values
+ * should be correctly updated.
+ * @tx_info - driver specific tls info.
+ * @q - tx queue on which packet is going out.
+ * @tid - TCB identifier.
+ * @pos - current index where should we start writing.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @reply - set 1 if looking for TP response.
+ * return - next position to write.
+ */
+static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u32 tid,
+ void *pos, u16 word, u64 mask,
+ u64 val, u32 reply)
+{
+ struct cpl_set_tcb_field_core *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *txpkt;
+ void *save_pos = NULL;
+ u8 buf[48] = {0};
+ int left;
+
+ left = (void *)q->q.stat - pos;
+ if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
+ if (!left) {
+ pos = q->q.desc;
+ } else {
+ save_pos = pos;
+ pos = buf;
+ }
+ }
+ /* ULP_TXPKT */
+ txpkt = pos;
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(txpkt + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(sizeof(*cpl));
+ pos = idata + 1;
+
+ cpl = pos;
+ /* CPL_SET_TCB_FIELD */
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+ NO_REPLY_V(!reply));
+ cpl->word_cookie = htons(TCB_WORD_V(word));
+ cpl->mask = cpu_to_be64(mask);
+ cpl->val = cpu_to_be64(val);
+
+ /* ULPTX_NOOP */
+ idata = (struct ulptx_idata *)(cpl + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+ idata->len = htonl(0);
+
+ if (save_pos) {
+ pos = chcr_copy_to_txd(buf, &q->q, save_pos,
+ CHCR_SET_TCB_FIELD_LEN);
+ } else {
+ /* check again if we are at the end of the queue */
+ if (left == CHCR_SET_TCB_FIELD_LEN)
+ pos = q->q.desc;
+ else
+ pos = idata + 1;
+ }
+
+ return pos;
+}
+
+/*
+ * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
+ * with updated values like tcp seq, ack, window etc.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_ack
+ * @tcp_win
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u64 tcp_seq,
+ u64 tcp_ack, u64 tcp_win)
+{
+ bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
+ u32 len, cpl = 0, ndesc, wr_len;
+ struct fw_ulptx_wr *wr;
+ int credits;
+ void *pos;
+
+ wr_len = sizeof(*wr);
+ /* there can be max 4 cpls, check if we have enough credits */
+ len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ ndesc = DIV_ROUND_UP(len, 64);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ /* make space for WR, we'll fill it later when we know all the cpls
+ * being sent out and have complete length.
+ */
+ wr = pos;
+ pos += wr_len;
+ /* update tx_max if its a re-transmit or the first wr */
+ if (first_wr || tcp_seq != tx_info->prev_seq) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_TX_MAX_W,
+ TCB_TX_MAX_V(TCB_TX_MAX_M),
+ TCB_TX_MAX_V(tcp_seq), 0);
+ cpl++;
+ }
+ /* reset snd una if it's a re-transmit pkt */
+ if (tcp_seq != tx_info->prev_seq) {
+ /* reset snd_una */
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_SND_UNA_RAW_W,
+ TCB_SND_UNA_RAW_V
+ (TCB_SND_UNA_RAW_M),
+ TCB_SND_UNA_RAW_V(0), 0);
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
+ cpl++;
+ }
+ /* update ack */
+ if (first_wr || tx_info->prev_ack != tcp_ack) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_NXT_W,
+ TCB_RCV_NXT_V(TCB_RCV_NXT_M),
+ TCB_RCV_NXT_V(tcp_ack), 0);
+ tx_info->prev_ack = tcp_ack;
+ cpl++;
+ }
+ /* update receive window */
+ if (first_wr || tx_info->prev_win != tcp_win) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_WND_W,
+ TCB_RCV_WND_V(TCB_RCV_WND_M),
+ TCB_RCV_WND_V(tcp_win), 0);
+ tx_info->prev_win = tcp_win;
+ cpl++;
+ }
+
+ if (cpl) {
+ /* get the actual length */
+ len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ /* ULPTX wr */
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->cookie = 0;
+ /* fill len in wr field */
+ wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+
+ ndesc = DIV_ROUND_UP(len, 64);
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_skb_copy
+ * @nskb - new skb where the frags to be added.
+ * @skb - old skb from which frags will be copied.
+ */
+static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ }
+
+ skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
+ nskb->len += skb->data_len;
+ nskb->data_len = skb->data_len;
+ nskb->truesize += skb->data_len;
+}
+
+/*
+ * chcr_ktls_get_tx_flits
+ * returns number of flits to be sent out, it includes key context length, WR
+ * size and skb fragments.
+ */
+static unsigned int
+chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+{
+ return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
+}
+
+/*
+ * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
+ * other than timestamp.
+ * @skb - skb contains partial record..
+ * return: 1 / 0
+ */
+static int
+chcr_ktls_check_tcp_options(struct tcphdr *tcp)
+{
+ int cnt, opt, optlen;
+ u_char *cp;
+
+ cp = (u_char *)(tcp + 1);
+ cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[0];
+ if (opt == TCPOPT_EOL)
+ break;
+ if (opt == TCPOPT_NOP) {
+ optlen = 1;
+ } else {
+ if (cnt < 2)
+ break;
+ optlen = cp[1];
+ if (optlen < 2 || optlen > cnt)
+ break;
+ }
+ switch (opt) {
+ case TCPOPT_NOP:
+ break;
+ default:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
+ * send out separately.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @q - TX queue.
+ * @tx_chan - channel number.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int
+chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ struct sge_eth_txq *q, uint32_t tx_chan)
+{
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ u32 ctrl, iplen, maclen;
+ struct ipv6hdr *ip6;
+ unsigned int ndesc;
+ struct tcphdr *tcp;
+ int len16, pktlen;
+ struct iphdr *ip;
+ int credits;
+ u8 buf[150];
+ void *pos;
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ /* packet length = eth hdr len + ip hdr len + tcp hdr len
+ * (including options).
+ */
+ pktlen = skb->len - skb->data_len;
+
+ ctrl = sizeof(*cpl) + pktlen;
+ len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(len16, 4);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cpl->len = htons(pktlen);
+ /* checksum offload */
+ cpl->ctrl1 = 0;
+
+ pos = cpl + 1;
+
+ memcpy(buf, skb->data, pktlen);
+ if (tx_info->ip_family == AF_INET) {
+ /* we need to correct ip header len */
+ ip = (struct iphdr *)(buf + maclen);
+ ip->tot_len = htons(pktlen - maclen);
+ } else {
+ ip6 = (struct ipv6hdr *)(buf + maclen);
+ ip6->payload_len = htons(pktlen - maclen);
+ }
+ /* now take care of the tcp header, if fin is not set then clear push
+ * bit as well, and if fin is set, it will be sent at the last so we
+ * need to update the tcp sequence number as per the last packet.
+ */
+ tcp = (struct tcphdr *)(buf + maclen + iplen);
+
+ if (!tcp->fin)
+ tcp->psh = 0;
+ else
+ tcp->seq = htonl(tx_info->prev_seq);
+
+ chcr_copy_to_txd(buf, &q->q, pos, pktlen);
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
+/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
+ * @tgt- buffer into which tail data gets added
+ * @skb- buffer from which the paged data comes from
+ * @shiftlen- shift up to this many bytes
+ */
+static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
+ int shiftlen)
+{
+ skb_frag_t *fragfrom, *fragto;
+ int from, to, todo;
+
+ WARN_ON(shiftlen > skb->data_len);
+
+ todo = shiftlen;
+ from = 0;
+ to = 0;
+ fragfrom = &skb_shinfo(skb)->frags[from];
+
+ while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+ fragfrom = &skb_shinfo(skb)->frags[from];
+ fragto = &skb_shinfo(tgt)->frags[to];
+
+ if (todo >= skb_frag_size(fragfrom)) {
+ *fragto = *fragfrom;
+ todo -= skb_frag_size(fragfrom);
+ from++;
+ to++;
+
+ } else {
+ __skb_frag_ref(fragfrom);
+ skb_frag_page_copy(fragto, fragfrom);
+ skb_frag_off_copy(fragto, fragfrom);
+ skb_frag_size_set(fragto, todo);
+
+ skb_frag_off_add(fragfrom, todo);
+ skb_frag_size_sub(fragfrom, todo);
+ todo = 0;
+
+ to++;
+ break;
+ }
+ }
+
+ /* Ready to "commit" this state change to tgt */
+ skb_shinfo(tgt)->nr_frags = to;
+
+ /* Reposition in the original skb */
+ to = 0;
+ while (from < skb_shinfo(skb)->nr_frags)
+ skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+
+ skb_shinfo(skb)->nr_frags = to;
+
+ WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+ skb->len -= shiftlen;
+ skb->data_len -= shiftlen;
+ skb->truesize -= shiftlen;
+ tgt->len += shiftlen;
+ tgt->data_len += shiftlen;
+ tgt->truesize += shiftlen;
+
+ return shiftlen;
+}
+
+/*
+ * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
+ * received has partial end part of the record, send out the complete record, so
+ * that crypto block will be able to generate TAG/HASH.
+ * @skb - segment which has complete or partial end part.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_push - tcp push bit.
+ * @mss - segment size.
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u32 tcp_seq,
+ bool tcp_push, u32 mss)
+{
+ u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
+ struct adapter *adap = tx_info->adap;
+ int credits, left, last_desc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct cpl_tx_sec_pdu *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ void *pos;
+ u64 *end;
+
+ /* get the number of flits required */
+ flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ /* number of descriptors */
+ ndesc = chcr_flits_to_desc(flits);
+ /* check if enough credits available */
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* Credits are below the threshold vaues, stop the queue after
+ * injecting the Work Request for this packet.
+ */
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = pos;
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ /* idata length will include cpl_tx_sec_pdu + key context size +
+ * cpl_tx_data header.
+ */
+ idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
+ sizeof(*tx_data));
+ /* SEC CPL */
+ cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
+ cpl->op_ivinsrtofst =
+ htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
+ cpl->pldlen = htonl(skb->data_len);
+
+ /* encryption should start after tls header size + iv size */
+ cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
+
+ cpl->aadstart_cipherstop_hi =
+ htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
+ CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
+ CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
+
+ /* authentication will also start after tls header + iv size */
+ cpl->cipherstop_lo_authinsert =
+ htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
+ CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
+ CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
+ cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
+ cpl->scmd1 = cpu_to_be64(tx_info->record_no);
+
+ pos = cpl + 1;
+ /* check if space left to fill the keys */
+ left = (void *)q->q.stat - pos;
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
+ tx_info->key_ctx_len);
+ left = (void *)q->q.stat - pos;
+
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* CPL_TX_DATA */
+ tx_data = (void *)pos;
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+
+ tx_data->rsvd = htonl(tcp_seq);
+
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ /* check left again, it might go beyond queue limit */
+ pos = tx_data + 1;
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ /* send the complete packet except the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
+
+ return 0;
+}
+
+/*
+ * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
+ * a middle part of a record, fetch the prior data to make it 16 byte aligned
+ * and then only send it out.
+ *
+ * @skb - skb contains partial record..
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_push - tcp push bit.
+ * @mss - segment size.
+ * @tls_rec_offset - offset from start of the tls record.
+ * @perior_data - data before the current segment, required to make this record
+ * 16 byte aligned.
+ * @prior_data_len - prior_data length (less than 16)
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q,
+ u32 tcp_seq, bool tcp_push, u32 mss,
+ u32 tls_rec_offset, u8 *prior_data,
+ u32 prior_data_len)
+{
+ struct adapter *adap = tx_info->adap;
+ u32 len16, wr_mid = 0, cipher_start;
+ unsigned int flits = 0, ndesc;
+ int credits, left, last_desc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct cpl_tx_sec_pdu *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ __be64 iv_record;
+ void *pos;
+ u64 *end;
+
+ /* get the number of flits required, it's a partial record so 2 flits
+ * (AES_BLOCK_SIZE) will be added.
+ */
+ flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ /* get the correct 8 byte IV of this record */
+ iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
+ /* If it's a middle record and not 16 byte aligned to run AES CTR, need
+ * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
+ * data will be added.
+ */
+ if (prior_data_len)
+ flits += 2;
+ /* number of descriptors */
+ ndesc = chcr_flits_to_desc(flits);
+ /* check if enough credits available */
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = pos;
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ /* idata length will include cpl_tx_sec_pdu + key context size +
+ * cpl_tx_data header.
+ */
+ idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
+ sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
+ /* SEC CPL */
+ cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
+ /* cipher start will have tls header + iv size extra if its a header
+ * part of tls record. else only 16 byte IV will be added.
+ */
+ cipher_start =
+ AES_BLOCK_LEN + 1 +
+ (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
+
+ cpl->op_ivinsrtofst =
+ htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
+ cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->aadstart_cipherstop_hi =
+ htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
+ cpl->cipherstop_lo_authinsert = 0;
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
+ cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
+ cpl->scmd1 = 0;
+
+ pos = cpl + 1;
+ /* check if space left to fill the keys */
+ left = (void *)q->q.stat - pos;
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
+ tx_info->key_ctx_len);
+ left = (void *)q->q.stat - pos;
+
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* CPL_TX_DATA */
+ tx_data = (void *)pos;
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) |
+ TX_LENGTH_V(skb->data_len + prior_data_len));
+ tx_data->rsvd = htonl(tcp_seq);
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ /* check left again, it might go beyond queue limit */
+ pos = tx_data + 1;
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
+ * bytes of actual IV and 4 bytes of 16 byte-sequence.
+ */
+ memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
+ memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
+ *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
+ htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
+ (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
+
+ pos += 16;
+ /* Prior_data_len will always be less than 16 bytes, fill the
+ * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
+ * to 0.
+ */
+ if (prior_data_len)
+ pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
+ /* send the complete packet except the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+
+ return 0;
+}
+
+/*
+ * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
+ * only plain text (only tls header and iv)
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @tcp_seq
+ * @mss - segment size.
+ * @tcp_push - tcp push bit.
+ * @q - TX queue.
+ * @port_id : port number
+ * @perior_data - data before the current segment, required to make this record
+ * 16 byte aligned.
+ * @prior_data_len - prior_data length (less than 16)
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb, u32 tcp_seq, u32 mss,
+ bool tcp_push, struct sge_eth_txq *q,
+ u32 port_id, u8 *prior_data,
+ u32 prior_data_len)
+{
+ int credits, left, len16, last_desc;
+ unsigned int flits = 0, ndesc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ u32 wr_mid = 0;
+ void *pos;
+ u64 *end;
+
+ flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ if (prior_data_len)
+ flits += 2;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = (struct ulp_txpkt *)(wr + 1);
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_DATAMODIFY_V(0) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_DEST_V(0) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ idata->len = htonl(sizeof(*tx_data) + prior_data_len);
+ /* CPL_TX_DATA */
+ tx_data = (struct cpl_tx_data *)(idata + 1);
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) |
+ TX_LENGTH_V(skb->data_len + prior_data_len));
+ /* set tcp seq number */
+ tx_data->rsvd = htonl(tcp_seq);
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ pos = tx_data + 1;
+ /* apart from prior_data_len, we should set remaining part of 16 bytes
+ * to be zero.
+ */
+ if (prior_data_len)
+ pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
+
+ /* check left again, it might go beyond queue limit */
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* send the complete packet including the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
+/*
+ * chcr_ktls_copy_record_in_skb
+ * @nskb - new skb where the frags to be added.
+ * @record - specific record which has complete 16k record in frags.
+ */
+static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct tls_record_info *record)
+{
+ int i = 0;
+
+ for (i = 0; i < record->num_frags; i++) {
+ skb_shinfo(nskb)->frags[i] = record->frags[i];
+ /* increase the frag ref count */
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ }
+
+ skb_shinfo(nskb)->nr_frags = record->num_frags;
+ nskb->data_len = record->len;
+ nskb->len += record->len;
+ nskb->truesize += record->len;
+}
+
+/*
+ * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
+ * sending the same segment again. It will discard the segment which is before
+ * the current tx max.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * return: NET_TX_OK/NET_XMIT_DROP.
+ */
+static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ struct fw_ulptx_wr *wr;
+ unsigned int ndesc;
+ int credits;
+ void *pos;
+ u32 len;
+
+ len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ ndesc = DIV_ROUND_UP(len, 64);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+
+ wr = pos;
+ /* ULPTX wr */
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->cookie = 0;
+ /* fill len in wr field */
+ wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+
+ pos += sizeof(*wr);
+
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_SND_UNA_RAW_W,
+ TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+ TCB_SND_UNA_RAW_V(0), 0);
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+
+ return 0;
+}
+
+/*
+ * chcr_end_part_handler: This handler will handle the record which
+ * is complete or if record's end part is received. T6 adapter has a issue that
+ * it can't send out TAG with partial record so if its an end part then we have
+ * to send TAG as well and for which we need to fetch the complete record and
+ * send it to crypto module.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record.
+ * @record - complete record of 16K size.
+ * @tcp_seq
+ * @mss - segment size in which TP needs to chop a packet.
+ * @tcp_push_no_fin - tcp push if fin is not set.
+ * @q - TX queue.
+ * @tls_end_offset - offset from end of the record.
+ * @last wr : check if this is the last part of the skb going out.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct tls_record_info *record,
+ u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ struct sge_eth_txq *q,
+ u32 tls_end_offset, bool last_wr)
+{
+ struct sk_buff *nskb = NULL;
+ /* check if it is a complete record */
+ if (tls_end_offset == record->len) {
+ nskb = skb;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
+ } else {
+ dev_kfree_skb_any(skb);
+
+ nskb = alloc_skb(0, GFP_KERNEL);
+ if (!nskb)
+ return NETDEV_TX_BUSY;
+ /* copy complete record in skb */
+ chcr_ktls_copy_record_in_skb(nskb, record);
+ /* packet is being sent from the beginning, update the tcp_seq
+ * accordingly.
+ */
+ tcp_seq = tls_record_start_seq(record);
+ /* reset snd una, so the middle record won't send the already
+ * sent part.
+ */
+ if (chcr_ktls_update_snd_una(tx_info, q))
+ goto out;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
+ }
+
+ if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ (last_wr && tcp_push_no_fin),
+ mss)) {
+ goto out;
+ }
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return NETDEV_TX_BUSY;
+}
+
+/*
+ * chcr_short_record_handler: This handler will take care of the records which
+ * doesn't have end part (1st part or the middle part(/s) of a record). In such
+ * cases, AES CTR will be used in place of AES GCM to send out partial packet.
+ * This partial record might be the first part of the record, or the middle
+ * part. In case of middle record we should fetch the prior data to make it 16
+ * byte aligned. If it has a partial tls header or iv then get to the start of
+ * tls header. And if it has partial TAG, then remove the complete TAG and send
+ * only the payload.
+ * There is one more possibility that it gets a partial header, send that
+ * portion as a plaintext.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @record - complete record of 16K size.
+ * @tcp_seq
+ * @mss - segment size in which TP needs to chop a packet.
+ * @tcp_push_no_fin - tcp push if fin is not set.
+ * @q - TX queue.
+ * @tls_end_offset - offset from end of the record.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct tls_record_info *record,
+ u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ struct sge_eth_txq *q, u32 tls_end_offset)
+{
+ u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
+ u8 prior_data[16] = {0};
+ u32 prior_data_len = 0;
+ u32 data_len;
+
+ /* check if the skb is ending in middle of tag/HASH, its a big
+ * trouble, send the packet before the HASH.
+ */
+ int remaining_record = tls_end_offset - skb->data_len;
+
+ if (remaining_record > 0 &&
+ remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
+ int trimmed_len = skb->data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
+ struct sk_buff *tmp_skb = NULL;
+ /* don't process the pkt if it is only a partial tag */
+ if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ goto out;
+
+ WARN_ON(trimmed_len > skb->data_len);
+
+ /* shift to those many bytes */
+ tmp_skb = alloc_skb(0, GFP_KERNEL);
+ if (unlikely(!tmp_skb))
+ goto out;
+
+ chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
+ /* free the last trimmed portion */
+ dev_kfree_skb_any(skb);
+ skb = tmp_skb;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
+ }
+ data_len = skb->data_len;
+ /* check if the middle record's start point is 16 byte aligned. CTR
+ * needs 16 byte aligned start point to start encryption.
+ */
+ if (tls_rec_offset) {
+ /* there is an offset from start, means its a middle record */
+ int remaining = 0;
+
+ if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ prior_data_len = tls_rec_offset;
+ tls_rec_offset = 0;
+ remaining = 0;
+ } else {
+ prior_data_len =
+ (tls_rec_offset -
+ (TLS_HEADER_SIZE + tx_info->iv_size))
+ % AES_BLOCK_LEN;
+ remaining = tls_rec_offset - prior_data_len;
+ }
+
+ /* if prior_data_len is not zero, means we need to fetch prior
+ * data to make this record 16 byte aligned, or we need to reach
+ * to start offset.
+ */
+ if (prior_data_len) {
+ int i = 0;
+ u8 *data = NULL;
+ skb_frag_t *f;
+ u8 *vaddr;
+ int frag_size = 0, frag_delta = 0;
+
+ while (remaining > 0) {
+ frag_size = skb_frag_size(&record->frags[i]);
+ if (remaining < frag_size)
+ break;
+
+ remaining -= frag_size;
+ i++;
+ }
+ f = &record->frags[i];
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ data = vaddr + skb_frag_off(f) + remaining;
+ frag_delta = skb_frag_size(f) - remaining;
+
+ if (frag_delta >= prior_data_len) {
+ memcpy(prior_data, data, prior_data_len);
+ kunmap_atomic(vaddr);
+ } else {
+ memcpy(prior_data, data, frag_delta);
+ kunmap_atomic(vaddr);
+ /* get the next page */
+ f = &record->frags[i + 1];
+ vaddr = kmap_atomic(skb_frag_page(f));
+ data = vaddr + skb_frag_off(f);
+ memcpy(prior_data + frag_delta,
+ data, (prior_data_len - frag_delta));
+ kunmap_atomic(vaddr);
+ }
+ /* reset tcp_seq as per the prior_data_required len */
+ tcp_seq -= prior_data_len;
+ /* include prio_data_len for further calculation.
+ */
+ data_len += prior_data_len;
+ }
+ /* reset snd una, so the middle record won't send the already
+ * sent part.
+ */
+ if (chcr_ktls_update_snd_una(tx_info, q))
+ goto out;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
+ } else {
+ /* Else means, its a partial first part of the record. Check if
+ * its only the header, don't need to send for encryption then.
+ */
+ if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id,
+ prior_data,
+ prior_data_len)) {
+ goto out;
+ }
+ return 0;
+ }
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
+ }
+
+ if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
+ mss, tls_rec_offset, prior_data,
+ prior_data_len)) {
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_BUSY;
+}
+
+/* nic tls TX handler */
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tcphdr *th = tcp_hdr(skb);
+ int data_len, qidx, ret = 0, mss;
+ struct tls_record_info *record;
+ struct chcr_stats_debug *stats;
+ struct chcr_ktls_info *tx_info;
+ u32 tls_end_offset, tcp_seq;
+ struct tls_context *tls_ctx;
+ struct sk_buff *local_skb;
+ int new_connection_state;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ unsigned long flags;
+
+ tcp_seq = ntohl(th->seq);
+
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
+
+ /* check if we haven't set it for ktls offload */
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (unlikely(tls_ctx->netdev != dev))
+ goto out;
+
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ tx_info = tx_ctx->chcr_info;
+
+ if (unlikely(!tx_info))
+ goto out;
+
+ /* check the connection state, we don't need to pass new connection
+ * state, state machine will check and update the new state if it is
+ * stuck due to responses not received from HW.
+ * Start the tx handling only if state is KTLS_CONN_TX_READY.
+ */
+ new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
+ if (new_connection_state != KTLS_CONN_TX_READY)
+ goto out;
+
+ /* don't touch the original skb, make a new skb to extract each records
+ * and send them separately.
+ */
+ local_skb = alloc_skb(0, GFP_KERNEL);
+
+ if (unlikely(!local_skb))
+ return NETDEV_TX_BUSY;
+
+ adap = tx_info->adap;
+ stats = &adap->chcr_stats;
+
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ /* if tcp options are set but finish is not send the options first */
+ if (!th->fin && chcr_ktls_check_tcp_options(th)) {
+ ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
+ tx_info->tx_chan);
+ if (ret)
+ return NETDEV_TX_BUSY;
+ }
+ /* update tcb */
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
+ ntohl(th->ack_seq),
+ ntohs(th->window));
+ if (ret) {
+ dev_kfree_skb_any(local_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* copy skb contents into local skb */
+ chcr_ktls_skb_copy(skb, local_skb);
+
+ /* go through the skb and send only one record at a time. */
+ data_len = skb->data_len;
+ /* TCP segments can be in received either complete or partial.
+ * chcr_end_part_handler will handle cases if complete record or end
+ * part of the record is received. Incase of partial end part of record,
+ * we will send the complete record again.
+ */
+
+ do {
+ int i;
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ /* lock taken */
+ spin_lock_irqsave(&tx_ctx->base.lock, flags);
+ /* fetch the tls record */
+ record = tls_get_record(&tx_ctx->base, tcp_seq,
+ &tx_info->record_no);
+ /* By the time packet reached to us, ACK is received, and record
+ * won't be found in that case, handle it gracefully.
+ */
+ if (unlikely(!record)) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
+ goto out;
+ }
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
+ goto out;
+ }
+
+ /* increase page reference count of the record, so that there
+ * won't be any chance of page free in middle if in case stack
+ * receives ACK and try to delete the record.
+ */
+ for (i = 0; i < record->num_frags; i++)
+ __skb_frag_ref(&record->frags[i]);
+ /* lock cleared */
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* if a tls record is finishing in this SKB */
+ if (tls_end_offset <= data_len) {
+ struct sk_buff *nskb = NULL;
+
+ if (tls_end_offset < data_len) {
+ nskb = alloc_skb(0, GFP_KERNEL);
+ if (unlikely(!nskb)) {
+ ret = -ENOMEM;
+ goto clear_ref;
+ }
+
+ chcr_ktls_skb_shift(nskb, local_skb,
+ tls_end_offset);
+ } else {
+ /* its the only record in this skb, directly
+ * point it.
+ */
+ nskb = local_skb;
+ }
+ ret = chcr_end_part_handler(tx_info, nskb, record,
+ tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tls_end_offset,
+ (nskb == local_skb));
+
+ if (ret && nskb != local_skb)
+ dev_kfree_skb_any(local_skb);
+
+ data_len -= tls_end_offset;
+ /* tcp_seq increment is required to handle next record.
+ */
+ tcp_seq += tls_end_offset;
+ } else {
+ ret = chcr_short_record_handler(tx_info, local_skb,
+ record, tcp_seq, mss,
+ (!th->fin && th->psh),
+ q, tls_end_offset);
+ data_len = 0;
+ }
+clear_ref:
+ /* clear the frag ref count which increased locally before */
+ for (i = 0; i < record->num_frags; i++) {
+ /* clear the frag ref count */
+ __skb_frag_unref(&record->frags[i]);
+ }
+ /* if any failure, come out from the loop. */
+ if (ret)
+ goto out;
+ /* length should never be less than 0 */
+ WARN_ON(data_len < 0);
+
+ } while (data_len > 0);
+
+ tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
+
+ atomic64_inc(&stats->ktls_tx_encrypted_packets);
+ atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
+
+ /* tcp finish is set, send a separate tcp msg including all the options
+ * as well.
+ */
+ if (th->fin)
+ chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h
new file mode 100644
index 000000000000..5a7ae2ca446e
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ktls.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifndef __CHCR_KTLS_H__
+#define __CHCR_KTLS_H__
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include <net/tls.h>
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "t4_tcb.h"
+#include "l2t.h"
+#include "chcr_common.h"
+#include "cxgb4_uld.h"
+
+#define CHCR_TCB_STATE_CLOSED 0
+#define CHCR_KTLS_KEY_CTX_LEN 16
+#define CHCR_SET_TCB_FIELD_LEN sizeof(struct cpl_set_tcb_field)
+#define CHCR_PLAIN_TX_DATA_LEN (sizeof(struct fw_ulptx_wr) +\
+ sizeof(struct ulp_txpkt) +\
+ sizeof(struct ulptx_idata) +\
+ sizeof(struct cpl_tx_data))
+
+#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
+ sizeof(struct cpl_tx_sec_pdu))
+
+enum chcr_ktls_conn_state {
+ KTLS_CONN_CLOSED,
+ KTLS_CONN_ACT_OPEN_REQ,
+ KTLS_CONN_ACT_OPEN_RPL,
+ KTLS_CONN_SET_TCB_REQ,
+ KTLS_CONN_SET_TCB_RPL,
+ KTLS_CONN_TX_READY,
+};
+
+struct chcr_ktls_info {
+ struct sock *sk;
+ spinlock_t lock; /* state machine lock */
+ struct ktls_key_ctx key_ctx;
+ struct adapter *adap;
+ struct l2t_entry *l2te;
+ struct net_device *netdev;
+ u64 iv;
+ u64 record_no;
+ int tid;
+ int atid;
+ int rx_qid;
+ u32 iv_size;
+ u32 prev_seq;
+ u32 prev_ack;
+ u32 salt_size;
+ u32 key_ctx_len;
+ u32 scmd0_seqno_numivs;
+ u32 scmd0_ivgen_hdrlen;
+ u32 tcp_start_seq_number;
+ u32 scmd0_short_seqno_numivs;
+ u32 scmd0_short_ivgen_hdrlen;
+ enum chcr_ktls_conn_state connection_state;
+ u16 prev_win;
+ u8 tx_chan;
+ u8 smt_idx;
+ u8 port_id;
+ u8 ip_family;
+ u8 first_qset;
+};
+
+struct chcr_ktls_ofld_ctx_tx {
+ struct tls_offload_context_tx base;
+ struct chcr_ktls_info *chcr_info;
+};
+
+static inline struct chcr_ktls_ofld_ctx_tx *
+chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct chcr_ktls_ofld_ctx_tx,
+ base);
+}
+
+static inline int chcr_get_first_rx_qid(struct adapter *adap)
+{
+ /* u_ctx is saved in adap, fetch it */
+ struct uld_ctx *u_ctx = adap->uld[CXGB4_ULD_CRYPTO].handle;
+
+ if (!u_ctx)
+ return -1;
+ return u_ctx->lldi.rxq_ids[0];
+}
+
+void chcr_enable_ktls(struct adapter *adap);
+void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index cceee8bc3c2f..7dcf2093e531 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -738,7 +738,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- static atomic_t devfreq_no = ATOMIC_INIT(-1);
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -800,8 +799,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
atomic_set(&devfreq->suspend_count, 0);
- dev_set_name(&devfreq->dev, "devfreq%d",
- atomic_inc_return(&devfreq_no));
+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index d4097856c86b..c343c7c10b4c 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
+ kfree(dmabuf->name);
kfree(dmabuf);
return 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e51d836afcc7..1092d4ce723e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
return;
}
- spin_lock(&cohc->lock);
-
/*
* When we reach this point, at least one queue item
* should have been moved over from cohc->queue to
@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- spin_unlock(&cohc->lock);
-
/*
* This tasklet will remove items from cohc->active
* and thus terminates them.
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 1d7347825b95..df47be612ebb 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
rc = minor;
+ kfree(dev);
goto ida_err;
}
@@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
rc = device_register(dev);
if (rc < 0) {
dev_err(&idxd->pdev->dev, "device register failed\n");
- put_device(dev);
goto dev_reg_err;
}
idxd_cdev->minor = minor;
@@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
dev_reg_err:
ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ put_device(dev);
ida_err:
- kfree(dev);
idxd_cdev->dev = NULL;
return rc;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6d907fe150aa..6ca6e520a2fa 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_config(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
dev_warn(dev, "Device config failed: %d\n", rc);
return rc;
}
@@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_enable(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
dev_warn(dev, "Device enable failed: %d\n", rc);
return rc;
}
@@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_register_dma_device(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
dev_dbg(dev, "Failed to register dmaengine device\n");
return rc;
}
@@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (val > idxd->max_tokens)
return -EINVAL;
- if (val > idxd->nr_tokens)
+ if (val > idxd->nr_tokens + group->tokens_reserved)
return -EINVAL;
group->tokens_reserved = val;
@@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", wq->size);
}
+static int total_claimed_wq_size(struct idxd_device *idxd)
+{
+ int i;
+ int wq_size = 0;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq_size += wq->size;
+ }
+
+ return wq_size;
+}
+
static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev,
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (size > idxd->max_wq_size)
+ if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
return -EINVAL;
wq->size = size;
@@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev,
return -EPERM;
old_type = wq->type;
- if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+ if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
+ wq->type = IDXD_WQT_NONE;
+ else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
wq->type = IDXD_WQT_KERNEL;
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
wq->type = IDXD_WQT_USER;
else
- wq->type = IDXD_WQT_NONE;
+ return -EINVAL;
/* If we are changing queue type, clear the name */
if (wq->type != old_type)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 066b21a32232..4d4477df4ede 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan);
- if (sdmac->event_id0)
+ if (sdmac->event_id0 >= 0)
sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1);
sdmac->event_id0 = 0;
sdmac->event_id1 = 0;
+ sdmac->context_loaded = false;
sdma_set_channel_priority(sdmac, 0);
@@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */
- if (sdmac->event_id0) {
+ if (sdmac->event_id0 >= 0) {
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3a45079d11ec..4a750e29bfb5 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Do not allocate if desc are waiting for ack */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
- if (async_tx_test_ack(&dma_desc->txd)) {
+ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
dma_desc->txd.flags = 0;
@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
- if (list_empty(&tdc->pending_sg_req)) {
- spin_unlock_irqrestore(&tdc->lock, flags);
- return 0;
- }
if (!tdc->busy)
goto skip_dma_stop;
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index ea79c2df28e0..0536866a58ce 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
@@ -96,6 +97,24 @@ struct udma_match_data {
u32 level_start_idx[];
};
+struct udma_hwdesc {
+ size_t cppi5_desc_size;
+ void *cppi5_desc_vaddr;
+ dma_addr_t cppi5_desc_paddr;
+
+ /* TR descriptor internal pointers */
+ void *tr_req_base;
+ struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_rx_flush {
+ struct udma_hwdesc hwdescs[2];
+
+ size_t buffer_size;
+ void *buffer_vaddr;
+ dma_addr_t buffer_paddr;
+};
+
struct udma_dev {
struct dma_device ddev;
struct device *dev;
@@ -112,6 +131,8 @@ struct udma_dev {
struct list_head desc_to_purge;
spinlock_t lock;
+ struct udma_rx_flush rx_flush;
+
int tchan_cnt;
int echan_cnt;
int rchan_cnt;
@@ -130,16 +151,6 @@ struct udma_dev {
u32 psil_base;
};
-struct udma_hwdesc {
- size_t cppi5_desc_size;
- void *cppi5_desc_vaddr;
- dma_addr_t cppi5_desc_paddr;
-
- /* TR descriptor internal pointers */
- void *tr_req_base;
- struct cppi5_tr_resp_t *tr_resp_base;
-};
-
struct udma_desc {
struct virt_dma_desc vd;
@@ -169,7 +180,7 @@ enum udma_chan_state {
struct udma_tx_drain {
struct delayed_work work;
- unsigned long jiffie;
+ ktime_t tstamp;
u32 residue;
};
@@ -502,7 +513,7 @@ static bool udma_is_chan_paused(struct udma_chan *uc)
{
u32 val, pause_mask;
- switch (uc->desc->dir) {
+ switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
val = udma_rchanrt_read(uc->rchan,
UDMA_RCHAN_RT_PEER_RT_EN_REG);
@@ -551,12 +562,17 @@ static void udma_sync_for_device(struct udma_chan *uc, int idx)
}
}
+static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
+{
+ return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
+}
+
static int udma_push_to_ring(struct udma_chan *uc, int idx)
{
struct udma_desc *d = uc->desc;
-
struct k3_ring *ring = NULL;
- int ret = -EINVAL;
+ dma_addr_t paddr;
+ int ret;
switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
@@ -567,21 +583,37 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
ring = uc->tchan->t_ring;
break;
default:
- break;
+ return -EINVAL;
}
- if (ring) {
- dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+ /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
+ if (idx == -1) {
+ paddr = udma_get_rx_flush_hwdesc_paddr(uc);
+ } else {
+ paddr = udma_curr_cppi5_desc_paddr(d, idx);
wmb(); /* Ensure that writes are not moved over this point */
udma_sync_for_device(uc, idx);
- ret = k3_ringacc_ring_push(ring, &desc_addr);
- uc->in_ring_cnt++;
}
+ ret = k3_ringacc_ring_push(ring, &paddr);
+ if (!ret)
+ uc->in_ring_cnt++;
+
return ret;
}
+static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
+{
+ if (uc->config.dir != DMA_DEV_TO_MEM)
+ return false;
+
+ if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
+ return true;
+
+ return false;
+}
+
static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
{
struct k3_ring *ring = NULL;
@@ -610,6 +642,10 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
if (cppi5_desc_is_tdcm(*addr))
return ret;
+ /* Check for flush descriptor */
+ if (udma_desc_is_rx_flush(uc, *addr))
+ return -ENOENT;
+
d = udma_udma_desc_from_paddr(uc, *addr);
if (d)
@@ -890,6 +926,9 @@ static int udma_stop(struct udma_chan *uc)
switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
+ if (!uc->cyclic && !uc->desc)
+ udma_push_to_ring(uc, -1);
+
udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
UDMA_PEER_RT_EN_ENABLE |
UDMA_PEER_RT_EN_TEARDOWN);
@@ -946,9 +985,10 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+ /* Transfer is incomplete, store current residue and time stamp */
if (peer_bcnt < bcnt) {
uc->tx_drain.residue = bcnt - peer_bcnt;
- uc->tx_drain.jiffie = jiffies;
+ uc->tx_drain.tstamp = ktime_get();
return false;
}
@@ -961,35 +1001,59 @@ static void udma_check_tx_completion(struct work_struct *work)
tx_drain.work.work);
bool desc_done = true;
u32 residue_diff;
- unsigned long jiffie_diff, delay;
+ ktime_t time_diff;
+ unsigned long delay;
+
+ while (1) {
+ if (uc->desc) {
+ /* Get previous residue and time stamp */
+ residue_diff = uc->tx_drain.residue;
+ time_diff = uc->tx_drain.tstamp;
+ /*
+ * Get current residue and time stamp or see if
+ * transfer is complete
+ */
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
- if (uc->desc) {
- residue_diff = uc->tx_drain.residue;
- jiffie_diff = uc->tx_drain.jiffie;
- desc_done = udma_is_desc_really_done(uc, uc->desc);
- }
-
- if (!desc_done) {
- jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
- residue_diff -= uc->tx_drain.residue;
- if (residue_diff) {
- /* Try to guess when we should check next time */
- residue_diff /= jiffie_diff;
- delay = uc->tx_drain.residue / residue_diff / 3;
- if (jiffies_to_msecs(delay) < 5)
- delay = 0;
- } else {
- /* No progress, check again in 1 second */
- delay = HZ;
+ if (!desc_done) {
+ /*
+ * Find the time delta and residue delta w.r.t
+ * previous poll
+ */
+ time_diff = ktime_sub(uc->tx_drain.tstamp,
+ time_diff) + 1;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /*
+ * Try to guess when we should check
+ * next time by calculating rate at
+ * which data is being drained at the
+ * peer device
+ */
+ delay = (time_diff / residue_diff) *
+ uc->tx_drain.residue;
+ } else {
+ /* No progress, check again in 1 second */
+ schedule_delayed_work(&uc->tx_drain.work, HZ);
+ break;
+ }
+
+ usleep_range(ktime_to_us(delay),
+ ktime_to_us(delay) + 10);
+ continue;
}
- schedule_delayed_work(&uc->tx_drain.work, delay);
- } else if (uc->desc) {
- struct udma_desc *d = uc->desc;
+ if (uc->desc) {
+ struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
- udma_start(uc);
- vchan_cookie_complete(&d->vd);
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ break;
+ }
+
+ break;
}
}
@@ -1033,29 +1097,27 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
goto out;
}
- if (uc->cyclic) {
- /* push the descriptor back to the ring */
- if (d == uc->desc) {
+ if (d == uc->desc) {
+ /* active descriptor */
+ if (uc->cyclic) {
udma_cyclic_packet_elapsed(uc);
vchan_cyclic_callback(&d->vd);
- }
- } else {
- bool desc_done = false;
-
- if (d == uc->desc) {
- desc_done = udma_is_desc_really_done(uc, d);
-
- if (desc_done) {
+ } else {
+ if (udma_is_desc_really_done(uc, d)) {
uc->bcnt += d->residue;
udma_start(uc);
+ vchan_cookie_complete(&d->vd);
} else {
schedule_delayed_work(&uc->tx_drain.work,
0);
}
}
-
- if (desc_done)
- vchan_cookie_complete(&d->vd);
+ } else {
+ /*
+ * terminated descriptor, mark the descriptor as
+ * completed to update the channel's cookie marker
+ */
+ dma_cookie_complete(&d->vd.tx);
}
}
out:
@@ -1965,36 +2027,81 @@ static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
return d;
}
+/**
+ * udma_get_tr_counters - calculate TR counters for a given length
+ * @len: Length of the trasnfer
+ * @align_to: Preferred alignment
+ * @tr0_cnt0: First TR icnt0
+ * @tr0_cnt1: First TR icnt1
+ * @tr1_cnt0: Second (if used) TR icnt0
+ *
+ * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
+ * For len >= SZ_64K two TRs are used in a simple way:
+ * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
+ * Second TR: the remaining length (tr1_cnt0)
+ *
+ * Returns the number of TRs the length needs (1 or 2)
+ * -EINVAL if the length can not be supported
+ */
+static int udma_get_tr_counters(size_t len, unsigned long align_to,
+ u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
+{
+ if (len < SZ_64K) {
+ *tr0_cnt0 = len;
+ *tr0_cnt1 = 1;
+
+ return 1;
+ }
+
+ if (align_to > 3)
+ align_to = 3;
+
+realign:
+ *tr0_cnt0 = SZ_64K - BIT(align_to);
+ if (len / *tr0_cnt0 >= SZ_64K) {
+ if (align_to) {
+ align_to--;
+ goto realign;
+ }
+ return -EINVAL;
+ }
+
+ *tr0_cnt1 = len / *tr0_cnt0;
+ *tr1_cnt0 = len % *tr0_cnt0;
+
+ return 2;
+}
+
static struct udma_desc *
udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
unsigned int sglen, enum dma_transfer_direction dir,
unsigned long tx_flags, void *context)
{
- enum dma_slave_buswidth dev_width;
struct scatterlist *sgent;
struct udma_desc *d;
- size_t tr_size;
struct cppi5_tr_type1_t *tr_req = NULL;
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
- u32 burst;
+ size_t tr_size;
+ int num_tr = 0;
+ int tr_idx = 0;
- if (dir == DMA_DEV_TO_MEM) {
- dev_width = uc->cfg.src_addr_width;
- burst = uc->cfg.src_maxburst;
- } else if (dir == DMA_MEM_TO_DEV) {
- dev_width = uc->cfg.dst_addr_width;
- burst = uc->cfg.dst_maxburst;
- } else {
- dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ if (!is_slave_direction(dir)) {
+ dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
return NULL;
}
- if (!burst)
- burst = 1;
+ /* estimate the number of TRs we will need */
+ for_each_sg(sgl, sgent, sglen, i) {
+ if (sg_dma_len(sgent) < SZ_64K)
+ num_tr++;
+ else
+ num_tr += 2;
+ }
/* Now allocate and setup the descriptor. */
tr_size = sizeof(struct cppi5_tr_type1_t);
- d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
if (!d)
return NULL;
@@ -2002,19 +2109,46 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
tr_req = d->hwdesc[0].tr_req_base;
for_each_sg(sgl, sgent, sglen, i) {
- d->residue += sg_dma_len(sgent);
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+
+ num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
+ &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %u is not supported\n",
+ sg_dma_len(sgent));
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
- tr_req[i].addr = sg_dma_address(sgent);
- tr_req[i].icnt0 = burst * dev_width;
- tr_req[i].dim1 = burst * dev_width;
- tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr0_cnt0;
+ tr_req[tr_idx].icnt1 = tr0_cnt1;
+ tr_req[tr_idx].dim1 = tr0_cnt0;
+ tr_idx++;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
+ false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
+ tr_req[tr_idx].icnt0 = tr1_cnt0;
+ tr_req[tr_idx].icnt1 = 1;
+ tr_req[tr_idx].dim1 = tr1_cnt0;
+ tr_idx++;
+ }
+
+ d->residue += sg_dma_len(sgent);
}
- cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
return d;
}
@@ -2319,47 +2453,66 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
size_t buf_len, size_t period_len,
enum dma_transfer_direction dir, unsigned long flags)
{
- enum dma_slave_buswidth dev_width;
struct udma_desc *d;
- size_t tr_size;
+ size_t tr_size, period_addr;
struct cppi5_tr_type1_t *tr_req;
- unsigned int i;
unsigned int periods = buf_len / period_len;
- u32 burst;
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+ unsigned int i;
+ int num_tr;
- if (dir == DMA_DEV_TO_MEM) {
- dev_width = uc->cfg.src_addr_width;
- burst = uc->cfg.src_maxburst;
- } else if (dir == DMA_MEM_TO_DEV) {
- dev_width = uc->cfg.dst_addr_width;
- burst = uc->cfg.dst_maxburst;
- } else {
- dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ if (!is_slave_direction(dir)) {
+ dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
return NULL;
}
- if (!burst)
- burst = 1;
+ num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
+ &tr0_cnt1, &tr1_cnt0);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ period_len);
+ return NULL;
+ }
/* Now allocate and setup the descriptor. */
tr_size = sizeof(struct cppi5_tr_type1_t);
- d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+ d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
if (!d)
return NULL;
tr_req = d->hwdesc[0].tr_req_base;
+ period_addr = buf_addr;
for (i = 0; i < periods; i++) {
- cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
- CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ int tr_idx = i * num_tr;
- tr_req[i].addr = buf_addr + period_len * i;
- tr_req[i].icnt0 = dev_width;
- tr_req[i].icnt1 = period_len / dev_width;
- tr_req[i].dim1 = dev_width;
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
+ false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[tr_idx].addr = period_addr;
+ tr_req[tr_idx].icnt0 = tr0_cnt0;
+ tr_req[tr_idx].icnt1 = tr0_cnt1;
+ tr_req[tr_idx].dim1 = tr0_cnt0;
+
+ if (num_tr == 2) {
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ tr_idx++;
+
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
+ false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
+ tr_req[tr_idx].icnt0 = tr1_cnt0;
+ tr_req[tr_idx].icnt1 = 1;
+ tr_req[tr_idx].dim1 = tr1_cnt0;
+ }
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[i].flags,
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
CPPI5_TR_CSF_SUPR_EVT);
+
+ period_addr += period_len;
}
return d;
@@ -2517,29 +2670,12 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- if (len < SZ_64K) {
- num_tr = 1;
- tr0_cnt0 = len;
- tr0_cnt1 = 1;
- } else {
- unsigned long align_to = __ffs(src | dest);
-
- if (align_to > 3)
- align_to = 3;
- /*
- * Keep simple: tr0: SZ_64K-alignment blocks,
- * tr1: the remaining
- */
- num_tr = 2;
- tr0_cnt0 = (SZ_64K - BIT(align_to));
- if (len / tr0_cnt0 >= SZ_64K) {
- dev_err(uc->ud->dev, "size %zu is not supported\n",
- len);
- return NULL;
- }
-
- tr0_cnt1 = len / tr0_cnt0;
- tr1_cnt0 = len % tr0_cnt0;
+ num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
+ &tr0_cnt1, &tr1_cnt0);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ len);
+ return NULL;
}
d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
@@ -2631,6 +2767,9 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
ret = dma_cookie_status(chan, cookie, txstate);
+ if (!udma_is_chan_running(uc))
+ ret = DMA_COMPLETE;
+
if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
ret = DMA_PAUSED;
@@ -2697,11 +2836,8 @@ static int udma_pause(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
- if (!uc->desc)
- return -EINVAL;
-
/* pause the channel */
- switch (uc->desc->dir) {
+ switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
udma_rchanrt_update_bits(uc->rchan,
UDMA_RCHAN_RT_PEER_RT_EN_REG,
@@ -2730,11 +2866,8 @@ static int udma_resume(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
- if (!uc->desc)
- return -EINVAL;
-
/* resume the channel */
- switch (uc->desc->dir) {
+ switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
udma_rchanrt_update_bits(uc->rchan,
UDMA_RCHAN_RT_PEER_RT_EN_REG,
@@ -3248,6 +3381,98 @@ static int udma_setup_resources(struct udma_dev *ud)
return ch_count;
}
+static int udma_setup_rx_flush(struct udma_dev *ud)
+{
+ struct udma_rx_flush *rx_flush = &ud->rx_flush;
+ struct cppi5_desc_hdr_t *tr_desc;
+ struct cppi5_tr_type1_t *tr_req;
+ struct cppi5_host_desc_t *desc;
+ struct device *dev = ud->dev;
+ struct udma_hwdesc *hwdesc;
+ size_t tr_size;
+
+ /* Allocate 1K buffer for discarded data on RX channel teardown */
+ rx_flush->buffer_size = SZ_1K;
+ rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
+ GFP_KERNEL);
+ if (!rx_flush->buffer_vaddr)
+ return -ENOMEM;
+
+ rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
+ rx_flush->buffer_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, rx_flush->buffer_paddr))
+ return -ENOMEM;
+
+ /* Set up descriptor to be used for TR mode */
+ hwdesc = &rx_flush->hwdescs[0];
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
+ hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+ ud->desc_align);
+
+ hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
+ GFP_KERNEL);
+ if (!hwdesc->cppi5_desc_vaddr)
+ return -ENOMEM;
+
+ hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
+ hwdesc->cppi5_desc_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
+ return -ENOMEM;
+
+ /* Start of the TR req records */
+ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+ /* Start address of the TR response array */
+ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
+
+ tr_desc = hwdesc->cppi5_desc_vaddr;
+ cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
+ cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(tr_desc, 0, 0);
+
+ tr_req = hwdesc->tr_req_base;
+ cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req->addr = rx_flush->buffer_paddr;
+ tr_req->icnt0 = rx_flush->buffer_size;
+ tr_req->icnt1 = 1;
+
+ /* Set up descriptor to be used for packet mode */
+ hwdesc = &rx_flush->hwdescs[1];
+ hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+ CPPI5_INFO0_HDESC_EPIB_SIZE +
+ CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
+ ud->desc_align);
+
+ hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
+ GFP_KERNEL);
+ if (!hwdesc->cppi5_desc_vaddr)
+ return -ENOMEM;
+
+ hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
+ hwdesc->cppi5_desc_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
+ return -ENOMEM;
+
+ desc = hwdesc->cppi5_desc_vaddr;
+ cppi5_hdesc_init(desc, 0, 0);
+ cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
+
+ cppi5_hdesc_attach_buf(desc,
+ rx_flush->buffer_paddr, rx_flush->buffer_size,
+ rx_flush->buffer_paddr, rx_flush->buffer_size);
+
+ dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
+ hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
+ return 0;
+}
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -3361,6 +3586,10 @@ static int udma_probe(struct platform_device *pdev)
if (ud->desc_align < dma_get_cache_alignment())
ud->desc_align = dma_get_cache_alignment();
+ ret = udma_setup_rx_flush(ud);
+ if (ret)
+ return ret;
+
for (i = 0; i < ud->tchan_cnt; i++) {
struct udma_tchan *tchan = &ud->tchans[i];
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 2d263382d797..880ffd833718 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
pinf = &p->ceinfo;
if (!priv->p_data->quirks) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "Bit Position: %d Data: 0x%08x\n",
+ "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank, pinf->col,
pinf->bitpos, pinf->data);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "BankGroup Number %d Block Number %d ",
- pinf->bankgrpnr, pinf->blknr);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "Bit Position: %d Data: 0x%08x\n",
+ "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank, pinf->col,
+ pinf->bankgrpnr, pinf->blknr,
pinf->bitpos, pinf->data);
}
@@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
"UE", pinf->row, pinf->bank, pinf->col);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "UE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "BankGroup Number %d Block Number %d",
+ "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
+ "UE", pinf->row, pinf->bank, pinf->col,
pinf->bankgrpnr, pinf->blknr);
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 621220ab3d0e..21ea99f65113 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -552,7 +552,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
seed = early_memremap(efi.rng_seed, sizeof(*seed));
if (seed != NULL) {
- size = seed->size;
+ size = READ_ONCE(seed->size);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");
@@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
sizeof(*seed) + size);
if (seed != NULL) {
pr_notice("seeding entropy pool\n");
- add_bootloader_randomness(seed->bits, seed->size);
+ add_bootloader_randomness(seed->bits, size);
early_memunmap(seed, sizeof(*seed) + size);
} else {
pr_err("Could not map UEFI random seed!\n");
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 03b43b7a6d1d..f71eaa5bf52d 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -29,6 +29,7 @@ struct imx_sc_chan {
struct mbox_client cl;
struct mbox_chan *ch;
int idx;
+ struct completion tx_done;
};
struct imx_sc_ipc {
@@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc)
}
EXPORT_SYMBOL(imx_scu_get_handle);
+/* Callback called when the word of a message is ack-ed, eg read by SCU */
+static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
+
+ complete(&sc_chan->tx_done);
+}
+
static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
{
struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
@@ -149,6 +158,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
for (i = 0; i < hdr->size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
+
+ /*
+ * SCU requires that all messages words are written
+ * sequentially but linux MU driver implements multiple
+ * independent channels for each register so ordering between
+ * different channels must be ensured by SCU API interface.
+ *
+ * Wait for tx_done before every send to ensure that no
+ * queueing happens at the mailbox channel level.
+ */
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
return ret;
@@ -247,6 +269,11 @@ static int imx_scu_probe(struct platform_device *pdev)
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+
sc_chan->sc_ipc = sc_ipc;
sc_chan->idx = i % 4;
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
index 4b56a587dacd..d073cb3ce699 100644
--- a/drivers/firmware/imx/misc.c
+++ b/drivers/firmware/imx/misc.c
@@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl {
u32 ctrl;
u32 val;
u16 resource;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_req_cpu_start {
struct imx_sc_rpc_msg hdr;
@@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start {
u32 address_lo;
u16 resource;
u8 enable;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_req_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
u16 resource;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_resp_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 val;
-} __packed;
+} __packed __aligned(4);
/*
* This function sets a miscellaneous control value.
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index b556612207e5..af3ae0087de4 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode {
struct imx_sc_rpc_msg hdr;
u16 resource;
u8 mode;
-} __packed;
+} __packed __aligned(4);
#define IMX_SCU_PD_NAME_SIZE 20
struct imx_sc_pm_domain {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 94e2fd758e01..42f4febe24c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1389,7 +1389,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_ATOMIC |
+ DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index d3c27a3c43f6..7546da0cc70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -195,6 +195,7 @@ struct amdgpu_gmc {
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
+ uint32_t sdpif_register;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 22bbb36c768e..02702597ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -52,7 +52,7 @@
* 1. Primary ring
* 2. Async ring
*/
-#define GFX10_NUM_GFX_RINGS 2
+#define GFX10_NUM_GFX_RINGS_NV1X 1
#define GFX10_MEC_HPD_SIZE 2048
#define F32_CE_PROGRAM_RAM_SIZE 65536
@@ -1304,7 +1304,7 @@ static int gfx_v10_0_sw_init(void *handle)
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -2710,18 +2710,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_commit(ring);
/* submit cs packet to copy state 0 to next available state */
- ring = &adev->gfx.gfx_ring[1];
- r = amdgpu_ring_alloc(ring, 2);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
- return r;
- }
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- amdgpu_ring_write(ring, 0);
+ if (adev->gfx.num_gfx_rings > 1) {
+ /* maximum supported gfx ring is 2 */
+ ring = &adev->gfx.gfx_ring[1];
+ r = amdgpu_ring_alloc(ring, 2);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
- amdgpu_ring_commit(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_commit(ring);
+ }
return 0;
}
@@ -2818,39 +2820,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */
- mutex_lock(&adev->srbm_mutex);
- gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
- ring = &adev->gfx.gfx_ring[1];
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
- /* Initialize the ring buffer's write pointers */
- ring->wptr = 0;
- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
- rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
- CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
- upper_32_bits(wptr_gpu_addr));
-
- mdelay(1);
- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
-
- rb_addr = ring->gpu_addr >> 8;
- WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
- WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
- WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
-
- gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
- mutex_unlock(&adev->srbm_mutex);
-
+ if (adev->gfx.num_gfx_rings > 1) {
+ mutex_lock(&adev->srbm_mutex);
+ gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
+ /* maximum supported gfx ring is 2 */
+ ring = &adev->gfx.gfx_ring[1];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
+ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+ /* Initialize the ring buffer's write pointers */
+ ring->wptr = 0;
+ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
+ /* Set the wb address wether it's enabled or not */
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
+ CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+
+ mdelay(1);
+ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+
+ rb_addr = ring->gpu_addr >> 8;
+ WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
+ WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
+
+ gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
+ }
/* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
@@ -3513,6 +3517,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@@ -3966,7 +3971,8 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
+ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
+
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_set_kiq_pm4_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3afdbbd6aaad..889154a78c4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3663,6 +3663,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 90216abf14a4..cc0c273a86f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1272,6 +1272,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
}
/**
+ * gmc_v9_0_restore_registers - restores regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This restores register values, saved at suspend.
+ */
+static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+}
+
+/**
* gmc_v9_0_gart_enable - gart enable
*
* @adev: amdgpu_device pointer
@@ -1377,6 +1390,20 @@ static int gmc_v9_0_hw_init(void *handle)
}
/**
+ * gmc_v9_0_save_registers - saves regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This saves potential register values that should be
+ * restored upon resume
+ */
+static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
+}
+
+/**
* gmc_v9_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
@@ -1412,9 +1439,16 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return gmc_v9_0_hw_fini(adev);
+ r = gmc_v9_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ gmc_v9_0_save_registers(adev);
+
+ return 0;
}
static int gmc_v9_0_resume(void *handle)
@@ -1422,6 +1456,7 @@ static int gmc_v9_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 2b488dfb2f21..d8945c31b622 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -89,6 +89,13 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
+
+/* for Vega20/arcturus regiter offset change */
+#define mmROM_INDEX_VG20 0x00e4
+#define mmROM_INDEX_VG20_BASE_IDX 0
+#define mmROM_DATA_VG20 0x00e5
+#define mmROM_DATA_VG20_BASE_IDX 0
+
/*
* Indirect registers accessor
*/
@@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
+ uint32_t rom_index_offset;
+ uint32_t rom_data_offset;
if (bios == NULL)
return false;
@@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
+ break;
+ default:
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+ break;
+ }
+
/* set rom index to 0 */
- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ WREG32(rom_index_offset, 0);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+ dw_ptr[i] = RREG32(rom_data_offset);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e8f66fbf399e..e997251a8b57 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1422,6 +1422,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_kms_helper_hotplug_event(dev);
}
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch(adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ /* pass data to smu controller */
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+
+ if (ret) {
+ mutex_unlock(&smu->mutex);
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
/**
* dm_hw_init() - Initialize DC device
* @handle: The base driver device containing the amdgpu_dm device.
@@ -1700,6 +1767,8 @@ static int dm_resume(void *handle)
amdgpu_dm_irq_resume_late(adev);
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5672f7765919..da73161043d5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -451,6 +451,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
+ aconnector->dc_link->cur_link_settings.lane_count = 0;
}
drm_connector_unregister(connector);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index f36a0d8cedfe..446ba0a7a4b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -840,8 +840,8 @@ static void hubbub1_det_request_size(
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
- swath_bytes_horz_wc = height * blk256_height * bpe;
- swath_bytes_vert_wc = width * blk256_width * bpe;
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 85f90f3e24cb..e310d67c399a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0
};
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 8960.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 11104.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 14000.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 16000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ /*Extra state, no dispclk ramping*/
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 2,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 8,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404.0,
+ .dummy_pstate_latency_us = 5.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 4,
+ .use_urgent_burst_bw = 0
+};
+
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
uint32_t hw_internal_rev)
{
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_soc;
+
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
return &dcn2_0_nv12_soc;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index b6f74bf4af02..27bb8c1ab858 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -7376,6 +7376,8 @@
#define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
#define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
// addressBlock: dce_dc_fmt4_dispdec
// base address: 0x2000
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 99ad4ddbe12f..96e81c7bc266 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
{
int ret = 0;
- if (min <= 0 && max <= 0)
+ if (min < 0 && max < 0)
return -EINVAL;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
@@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+
+ if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
}
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 0d73a49166af..aed4d6e60907 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -1063,15 +1063,6 @@ static int navi10_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
- if (ret)
- return ret;
-
- smu->watermarks_bitmap |= WATERMARKS_LOADED;
- }
-
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
*clock_ranges)
{
int i;
+ int ret = 0;
Watermarks_t *table = watermarks;
if (!table || !clock_ranges)
@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
+ /* pass data to smu controller */
+ if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 861e6410363b..3ad0f4aa3aa3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -111,8 +111,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
CLK_MAP(SCLK, CLOCK_GFXCLK),
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
- CLK_MAP(UCLK, CLOCK_UMCCLK),
- CLK_MAP(MCLK, CLOCK_UMCCLK),
+ CLK_MAP(UCLK, CLOCK_FCLK),
+ CLK_MAP(MCLK, CLOCK_FCLK),
};
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -280,7 +280,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
count = NUM_MEMCLK_DPM_LEVELS;
- cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
+ cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
case SMU_DCEFCLK:
count = NUM_DCFCLK_DPM_LEVELS;
@@ -806,9 +806,10 @@ static int renoir_set_watermarks_table(
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
/* pass data to smu controller */
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
pr_err("Failed to update WMTABLE!");
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index b06c057a9002..c9e5ce135fd4 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -978,8 +978,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
- max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ if (!smu->smu_table.max_sustainable_clocks)
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
GFP_KERNEL);
+ else
+ max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
+
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 870e6db2907e..518e6597bf2d 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -458,9 +458,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
{
int ret = 0;
- if (max < min)
- return -EINVAL;
-
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 56f55c53abfd..2dfa2fd2a23b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ dpcd[0] = dp_bw;
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index cce0b1bba591..ed0fea2ac322 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
{
switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
/* Teardown the old pdt, if there is one */
if (port->pdt != DP_PEER_DEVICE_NONE) {
- if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/*
* If the new PDT would also have an i2c bus,
* don't bother with reregistering it
*/
if (new_pdt != DP_PEER_DEVICE_NONE &&
- drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
port->pdt = new_pdt;
port->mcs = new_mcs;
return 0;
@@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
port->mcs = new_mcs;
if (port->pdt != DP_PEER_DEVICE_NONE) {
- if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
} else {
@@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
}
if (port->pdt != DP_PEER_DEVICE_NONE &&
- drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
mutex_unlock(&mgr->lock);
}
- if (old_ddps != port->ddps) {
- if (port->ddps) {
- if (!port->input) {
- drm_dp_send_enum_path_resources(mgr, mstb,
- port);
- }
+ /*
+ * Reprobe PBN caps on both hotplug, and when re-probing the link
+ * for our parent mstb
+ */
+ if (old_ddps != port->ddps || !created) {
+ if (port->ddps && !port->input) {
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
+ if (ret == 1)
+ changed = true;
} else {
- port->available_pbn = 0;
+ port->full_pbn = 0;
}
}
@@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) {
- if (port->ddps) {
- dowork = true;
- } else {
- port->available_pbn = 0;
- }
+ if (port->ddps && !port->input)
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
+ else
+ port->full_pbn = 0;
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
@@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
if (port->input || !port->ddps)
continue;
- if (!port->available_pbn) {
- drm_modeset_lock(&mgr->base.lock, NULL);
- drm_dp_send_enum_path_resources(mgr, mstb, port);
- drm_modeset_unlock(&mgr->base.lock);
- changed = true;
- }
-
if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
@@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
+ ret = 0;
path_res = &txmsg->reply.u.path_resources;
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res->port_number,
path_res->full_payload_bw_number,
path_res->avail_payload_bw_number);
- port->available_pbn =
- path_res->avail_payload_bw_number;
+
+ /*
+ * If something changed, make sure we send a
+ * hotplug
+ */
+ if (port->full_pbn != path_res->full_payload_bw_number ||
+ port->fec_capable != path_res->fec_capable)
+ ret = 1;
+
+ port->full_pbn = path_res->full_payload_bw_number;
port->fec_capable = path_res->fec_capable;
}
}
kfree(txmsg);
- return 0;
+ return ret;
}
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
@@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
/* The link address will need to be re-sent on resume */
mstb->link_address_sent = false;
- list_for_each_entry(port, &mstb->ports, next) {
- /* The PBN for each port will also need to be re-probed */
- port->available_pbn = 0;
-
+ list_for_each_entry(port, &mstb->ports, next)
if (port->mstb)
drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
- }
}
/**
@@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false;
}
-static inline
-int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
- struct drm_dp_mst_topology_state *mst_state)
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_topology_state *state);
+
+static int
+drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_topology_state *state)
{
- struct drm_dp_mst_port *port;
struct drm_dp_vcpi_allocation *vcpi;
- int pbn_limit = 0, pbn_used = 0;
+ struct drm_dp_mst_port *port;
+ int pbn_used = 0, ret;
+ bool found = false;
- list_for_each_entry(port, &branch->ports, next) {
- if (port->mstb)
- if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
- return -ENOSPC;
+ /* Check that we have at least one port in our state that's downstream
+ * of this branch, otherwise we can skip this branch
+ */
+ list_for_each_entry(vcpi, &state->vcpis, next) {
+ if (!vcpi->pbn ||
+ !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
+ continue;
- if (port->available_pbn > 0)
- pbn_limit = port->available_pbn;
+ found = true;
+ break;
}
- DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
- branch, pbn_limit);
+ if (!found)
+ return 0;
- list_for_each_entry(vcpi, &mst_state->vcpis, next) {
- if (!vcpi->pbn)
- continue;
+ if (mstb->port_parent)
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
+ mstb->port_parent->parent, mstb->port_parent,
+ mstb);
+ else
+ DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
+ mstb);
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
+ if (ret < 0)
+ return ret;
- if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
- pbn_used += vcpi->pbn;
+ pbn_used += ret;
}
- DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
- branch, pbn_used);
- if (pbn_used > pbn_limit) {
- DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
- branch);
+ return pbn_used;
+}
+
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_topology_state *state)
+{
+ struct drm_dp_vcpi_allocation *vcpi;
+ int pbn_used = 0;
+
+ if (port->pdt == DP_PEER_DEVICE_NONE)
+ return 0;
+
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+ bool found = false;
+
+ list_for_each_entry(vcpi, &state->vcpis, next) {
+ if (vcpi->port != port)
+ continue;
+ if (!vcpi->pbn)
+ return 0;
+
+ found = true;
+ break;
+ }
+ if (!found)
+ return 0;
+
+ /* This should never happen, as it means we tried to
+ * set a mode before querying the full_pbn
+ */
+ if (WARN_ON(!port->full_pbn))
+ return -EINVAL;
+
+ pbn_used = vcpi->pbn;
+ } else {
+ pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
+ state);
+ if (pbn_used <= 0)
+ return pbn_used;
+ }
+
+ if (pbn_used > port->full_pbn) {
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
+ port->parent, port, pbn_used,
+ port->full_pbn);
return -ENOSPC;
}
- return 0;
+
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
+ port->parent, port, pbn_used, port->full_pbn);
+
+ return pbn_used;
}
static inline int
@@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
if (ret)
break;
- ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
- if (ret)
+
+ mutex_lock(&mgr->lock);
+ ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
+ mst_state);
+ mutex_unlock(&mgr->lock);
+ if (ret < 0)
break;
+ else
+ ret = 0;
}
return ret;
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a421a2eed48a..df31e5782eed 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (ret)
goto err_zero_use;
- if (obj->import_attach)
+ if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
- else
+ } else {
+ pgprot_t prot = PAGE_KERNEL;
+
+ if (!shmem->map_cached)
+ prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, prot);
+ }
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
@@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (!shmem->map_cached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8428ae12dfa5..1f79bc2a881e 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index ff59c641fa80..1eed3327999f 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -40,6 +40,7 @@
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 9ebc02768847..619f81435c1b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
* mapping.
*/
static int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
+ struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
return ret;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
- if (to_dma_iommu_mapping(subdrv_dev))
+ /*
+ * Keep the original DMA mapping of the sub-device and
+ * restore it on Exynos DRM detach, otherwise the DMA
+ * framework considers it as IOMMU-less during the next
+ * probe (in case of deferred probe or modular build)
+ */
+ *dma_priv = to_dma_iommu_mapping(subdrv_dev);
+ if (*dma_priv)
arm_iommu_detach_device(subdrv_dev);
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
* mapping
*/
static void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
+ struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
arm_iommu_detach_device(subdrv_dev);
- else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ arm_iommu_attach_device(subdrv_dev, *dma_priv);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev);
clear_dma_max_seg_size(subdrv_dev);
}
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv)
{
struct exynos_drm_private *priv = drm->dev_private;
@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
priv->mapping = mapping;
}
- return drm_iommu_attach_device(drm, dev);
+ return drm_iommu_attach_device(drm, dev, dma_priv);
}
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv)
{
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
- drm_iommu_detach_device(drm, dev);
+ drm_iommu_detach_device(drm, dev, dma_priv);
}
void exynos_drm_cleanup_dma(struct drm_device *drm)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d4d21d8cfb90..6ae9056e7a18 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return priv->mapping ? true : false;
}
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv);
void exynos_drm_cleanup_dma(struct drm_device *drm);
#ifdef CONFIG_DRM_EXYNOS_DPI
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 33628d85edad..a85365c56d4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1773,8 +1773,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
- dev_info(dev, "failed to get regulators: %d\n", ret);
- return -EPROBE_DEFER;
+ if (ret != -EPROBE_DEFER)
+ dev_info(dev, "failed to get regulators: %d\n", ret);
+ return ret;
}
dsi->clks = devm_kcalloc(dev,
@@ -1787,9 +1788,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
- i--;
- continue;
+ dsi->clks[i] = devm_clk_get(dev,
+ OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
}
dev_info(dev, "failed to get the clock: %s\n",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8ea2e1d77802..29ab8be8604c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -97,6 +97,7 @@ struct fimc_scaler {
struct fimc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 21aec38702fc..bb67cad8371f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void fimd_unbind(struct device *dev, struct device *master,
@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_atomic_disable(ctx->crtc);
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 2a3382d43bc9..fcee33a43aca 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -232,6 +232,7 @@ struct g2d_runqueue_node {
struct g2d_data {
struct device *dev;
+ void *dma_priv;
struct clk *gate_clk;
void __iomem *regs;
int irq;
@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = exynos_drm_register_dma(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_unregister_dma(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
}
static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 88b6fcaa20be..45e9aee8366a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -97,6 +97,7 @@ struct gsc_scaler {
struct gsc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index b98482990d1a..dafa87b82052 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -56,6 +56,7 @@ struct rot_variant {
struct rot_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock;
@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
rot->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
}
static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 497973e9b2c5..93c43c8d914e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -39,6 +39,7 @@ struct scaler_data {
struct scaler_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK];
@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
scaler->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
+ &scaler->dma_priv);
}
static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 9ff921f43a93..f141916eade6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1805,18 +1805,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
- if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+ if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
if (IS_ERR(hdata->reg_hdmi_en))
return PTR_ERR(hdata->reg_hdmi_en);
- ret = regulator_enable(hdata->reg_hdmi_en);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hdmi-en regulator\n");
- return ret;
- }
- }
-
return hdmi_bridge_init(hdata);
}
@@ -2023,6 +2015,15 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
+ if (!IS_ERR(hdata->reg_hdmi_en)) {
+ ret = regulator_enable(hdata->reg_hdmi_en);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to enable hdmi-en regulator\n");
+ goto err_hdmiphy;
+ }
+ }
+
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2047,7 +2048,8 @@ err_unregister_audio:
err_rpm_disable:
pm_runtime_disable(dev);
-
+ if (!IS_ERR(hdata->reg_hdmi_en))
+ regulator_disable(hdata->reg_hdmi_en);
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 38ae9c32feef..21b726baedea 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -94,6 +94,7 @@ struct mixer_context {
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags;
@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
+ &mixer_ctx->dma_priv);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
+ &mixer_ctx->dma_priv);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
index 0da860200410..e2ac09894a6d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -83,7 +83,6 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
-#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 73cd28a6ea07..86000127d4ee 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -46,7 +46,6 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
- struct work_struct display_reset_wq;
bool power_on;
int irq;
@@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
- ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
-static void drm_underflow_wq(struct work_struct *work)
-{
- struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
- display_reset_wq);
- struct drm_device *drm_dev = ctx->crtc->dev;
- struct drm_atomic_state *state;
-
- state = drm_atomic_helper_suspend(drm_dev);
- drm_atomic_helper_resume(drm_dev, state);
-}
-
static irqreturn_t ade_irq_handler(int irq, void *data)
{
struct ade_hw_ctx *ctx = data;
@@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
- if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
- ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
- MASK(1), 1);
- DRM_ERROR("LDI underflow!");
- schedule_work(&ctx->display_reset_wq);
- }
return IRQ_HANDLED;
}
@@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
if (ret)
return ERR_PTR(-EIO);
- INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
ctx->crtc = crtc;
return ctx;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8c5f8934dbd..a1f2411aa21b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -294,7 +294,7 @@ extra-$(CONFIG_DRM_I915_WERROR) += \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
- cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 21561acfa3ac..46c40db992dd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4466,13 +4466,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- u32 val;
+ u32 mask, val;
- val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
- MBUS_ABOX_BT_CREDIT_POOL2(16) |
- MBUS_ABOX_B_CREDIT(1) |
- MBUS_ABOX_BW_CREDIT(1);
+ mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
+ MBUS_ABOX_BT_CREDIT_POOL2_MASK |
+ MBUS_ABOX_B_CREDIT_MASK |
+ MBUS_ABOX_BW_CREDIT_MASK;
+ val = I915_READ(MBUS_ABOX_CTL);
+ val &= ~mask;
+ val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
I915_WRITE(MBUS_ABOX_CTL, val);
}
@@ -4968,8 +4974,21 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
} else {
+ u32 val;
+
I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+
+ /* Wa_22010178259:tgl */
+ val = I915_READ(BW_BUDDY1_CTL);
+ val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
+ val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
+ I915_WRITE(BW_BUDDY1_CTL, val);
+
+ val = I915_READ(BW_BUDDY2_CTL);
+ val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
+ val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
+ I915_WRITE(BW_BUDDY2_CTL, val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 89c9cf5f38d2..83025052c965 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -852,10 +852,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!crtc_state->has_psr)
+ if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+ dev_priv->psr.force_mode_changed = false;
+
+ if (!crtc_state->has_psr)
return;
WARN_ON(dev_priv->drrs.dp);
@@ -1009,6 +1011,8 @@ void intel_psr_update(struct intel_dp *intel_dp,
if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
return;
+ dev_priv->psr.force_mode_changed = false;
+
mutex_lock(&dev_priv->psr.lock);
enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
@@ -1534,7 +1538,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_crtc_state *crtc_state;
if (!CAN_PSR(dev_priv) || !new_state->crtc ||
- dev_priv->psr.initially_probed)
+ !dev_priv->psr.force_mode_changed)
return;
intel_connector = to_intel_connector(connector);
@@ -1545,5 +1549,18 @@ void intel_psr_atomic_check(struct drm_connector *connector,
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
crtc_state->mode_changed = true;
- dev_priv->psr.initially_probed = true;
+}
+
+void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!intel_dp)
+ return;
+
+ dev_priv = dp_to_i915(intel_dp);
+ if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp)
+ return;
+
+ dev_priv->psr.force_mode_changed = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index c58a1d438808..274fc6bb6221 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
+void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 60c984e10c4a..7643a30ba4cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
- if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+ if (unlikely(entry->alignment &&
+ !is_power_of_2_u64(entry->alignment)))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 35985218bd85..5da9f9e534b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -225,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ cond_resched();
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f7e4b39c734f..59b387ade49c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -256,8 +256,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
+ I915_SHRINK_UNBOUND);
}
return freed;
@@ -336,7 +335,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index ef7c74cff28a..43912e9b683d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return false;
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 8a5054f21bf8..24c99d0838af 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -147,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
+ mutex_unlock(&tl->mutex);
+
timeout = dma_fence_wait_timeout(fence,
interruptible,
timeout);
dma_fence_put(fence);
+
+ /* Retirement is best effort */
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++;
+ goto out_active;
+ }
}
}
if (!retire_requests(tl) || flush_submission(gt))
active_count++;
+ mutex_unlock(&tl->mutex);
- spin_lock(&timelines->lock);
+out_active: spin_lock(&timelines->lock);
- /* Resume iteration after dropping lock */
+ /* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index fe8a59aaa629..940e7f7df69a 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine))
return false;
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return false;
-
- hint = max(rq_prio(list_next_entry(rq, sched.link)),
- engine->execlists.queue_priority_hint);
+ hint = engine->execlists.queue_priority_hint;
+ if (!list_is_last(&rq->sched.link, &engine->active.requests))
+ hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
return hint >= effective_prio(rq);
}
@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+ if (timer_pending(&execlists->timer))
+ return;
+
+ set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
static void record_preemption(struct intel_engine_execlists *execlists)
{
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- if (!execlists->timer.expires &&
- need_timeslice(engine, last))
- set_timer_ms(&execlists->timer,
- timeslice(engine));
-
+ start_timeslice(engine);
return;
}
}
@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this for another */
+ start_timeslice(engine);
+ return; /* leave this for another sibling */
}
ENGINE_TRACE(engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 87716529cd2f..d8d9f1179c2b 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
static void cacheline_free(struct intel_timeline_cacheline *cl)
{
+ if (!i915_active_acquire_if_busy(&cl->active)) {
+ __idle_cacheline_free(cl);
+ return;
+ }
+
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
- if (i915_active_is_idle(&cl->active))
- __idle_cacheline_free(cl);
+ i915_active_release(&cl->active);
}
int intel_timeline_init(struct intel_timeline *timeline,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 4e292d4bf7b9..173a7f2d109f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -575,24 +575,19 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- u32 val;
-
/* Wa_1409142259:tgl */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
- /* Wa_1604555607:tgl */
- val = intel_uncore_read(engine->uncore, FF_MODE2);
- val &= ~FF_MODE2_TDS_TIMER_MASK;
- val |= FF_MODE2_TDS_TIMER_128;
/*
- * FIXME: FF_MODE2 register is not readable till TGL B0. We can
- * enable verification of WA from the later steppings, which enables
- * the read of FF_MODE2.
+ * Wa_1604555607:gen12 and Wa_1608008084:gen12
+ * FF_MODE2 register will return the wrong value when read. The default
+ * value for this register is zero for all fields and there are no bit
+ * masks. So instead of doing a RMW we should just write the TDS timer
+ * value for Wa_1604555607.
*/
- wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
- IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
- FF_MODE2_TDS_TIMER_MASK);
+ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128, 0);
}
static void
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e1c313da6c00..a62bdf9be682 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2477a1e5a166..ae139f0877ae 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = container_of(pos,
struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
+ list_del(pos);
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
kfree(dmabuf_obj);
- list_del(pos);
break;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 867e7629025b..33569b910ed5 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
/* there's features depending on version! */
v->header.version = 155;
v->header.header_size = sizeof(v->header);
- v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+ v->header.vbt_size = sizeof(struct vbt);
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header);
- v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
- - sizeof(struct bdb_header);
+ v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
/* general features */
v->general_features_header.id = BDB_GENERAL_FEATURES;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 85bd9bf4f6ee..345c2aa3b491 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- mutex_lock(&vgpu->vgpu_lock);
-
WARN(vgpu->active, "vGPU is still active!\n");
+ /*
+ * remove idr first so later clean can judge if need to stop
+ * service if no active vgpu.
+ */
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ mutex_unlock(&gvt->lock);
+
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
@@ -560,9 +566,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
- intel_vgpu_reset_display(vgpu);
if (dmlr) {
+ intel_vgpu_reset_display(vgpu);
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f7385abdd74b..8410330ce4f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -56,6 +56,7 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_psr.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@@ -330,6 +331,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
intel_init_ipc(i915);
+ intel_psr_set_force_mode_changed(i915->psr.dp);
+
return 0;
cleanup_gem:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 077af22b8340..810e3ccd56ec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -505,7 +505,7 @@ struct i915_psr {
bool dc3co_enabled;
u32 dc3co_exit_delay;
struct delayed_work idle_work;
- bool initially_probed;
+ bool force_mode_changed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 83f01401b8b5..f631f6d21127 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -437,7 +437,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -494,7 +494,7 @@ static const struct intel_device_info vlv_info = {
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0f556d80ba36..3b6b913bd27a 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1954,9 +1954,10 @@ out:
return i915_vma_get(oa_bo->vma);
}
-static int emit_oa_config(struct i915_perf_stream *stream,
- struct i915_oa_config *oa_config,
- struct intel_context *ce)
+static struct i915_request *
+emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1964,7 +1965,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -1989,13 +1990,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
err = rq->engine->emit_bb_start(rq,
vma->node.start, 0,
I915_DISPATCH_SECURE);
+ if (err)
+ goto err_add_request;
+
+ i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
- return err;
+ return err ? ERR_PTR(err) : rq;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2003,7 +2008,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+hsw_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2406,7 +2412,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
}
-static int gen8_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+gen8_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2448,7 +2455,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
- return ret;
+ return ERR_PTR(ret);
return emit_oa_config(stream, oa_config, oa_context(stream));
}
@@ -2460,7 +2467,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
-static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+gen12_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2491,7 +2499,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = gen12_configure_all_contexts(stream, oa_config);
if (ret)
- return ret;
+ return ERR_PTR(ret);
/*
* For Gen12, performance counters are context
@@ -2501,7 +2509,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
if (stream->ctx) {
ret = gen12_configure_oar_context(stream, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
return emit_oa_config(stream, oa_config, oa_context(stream));
@@ -2696,6 +2704,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.read = i915_oa_read,
};
+static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
+{
+ struct i915_request *rq;
+
+ rq = stream->perf->ops.enable_metric_set(stream);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+
+ return 0;
+}
+
/**
* i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream
@@ -2829,7 +2851,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->ops = &i915_oa_stream_ops;
perf->exclusive_stream = stream;
- ret = perf->ops.enable_metric_set(stream);
+ ret = i915_perf_stream_enable_sync(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -3147,7 +3169,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
- int err;
+ struct i915_request *rq;
/*
* If OA is bound to a specific context, emit the
@@ -3158,11 +3180,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- err = emit_oa_config(stream, config, oa_context(stream));
- if (err == 0)
+ rq = emit_oa_config(stream, config, oa_context(stream));
+ if (!IS_ERR(rq)) {
config = xchg(&stream->oa_config, config);
- else
- ret = err;
+ i915_request_put(rq);
+ } else {
+ ret = PTR_ERR(rq);
+ }
}
i915_oa_config_put(config);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 45e581455f5d..a0e22f00f6cf 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -339,7 +339,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct i915_perf_stream *stream);
+ struct i915_request *
+ (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index ec0299490dd4..aa729d04abe2 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -822,11 +822,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static struct attribute_group i915_pmu_events_attr_group = {
- .name = "events",
- /* Patch in attrs at runtime. */
-};
-
static ssize_t
i915_pmu_get_attr_cpumask(struct device *dev,
struct device_attribute *attr,
@@ -846,13 +841,6 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs,
};
-static const struct attribute_group *i915_pmu_attr_groups[] = {
- &i915_pmu_format_attr_group,
- &i915_pmu_events_attr_group,
- &i915_pmu_cpumask_attr_group,
- NULL
-};
-
#define __event(__config, __name, __unit) \
{ \
.config = (__config), \
@@ -1026,23 +1014,23 @@ err_alloc:
static void free_event_attributes(struct i915_pmu *pmu)
{
- struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
+ struct attribute **attr_iter = pmu->events_attr_group.attrs;
for (; *attr_iter; attr_iter++)
kfree((*attr_iter)->name);
- kfree(i915_pmu_events_attr_group.attrs);
+ kfree(pmu->events_attr_group.attrs);
kfree(pmu->i915_attr);
kfree(pmu->pmu_attr);
- i915_pmu_events_attr_group.attrs = NULL;
+ pmu->events_attr_group.attrs = NULL;
pmu->i915_attr = NULL;
pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
GEM_BUG_ON(!pmu->base.event_init);
@@ -1055,7 +1043,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target;
GEM_BUG_ON(!pmu->base.event_init);
@@ -1072,8 +1060,6 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
@@ -1087,21 +1073,22 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &pmu->node);
+ ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
}
- cpuhp_slot = slot;
+ pmu->cpuhp.slot = slot;
return 0;
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
- cpuhp_remove_multi_state(cpuhp_slot);
+ WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
+ cpuhp_remove_multi_state(pmu->cpuhp.slot);
+ pmu->cpuhp.slot = CPUHP_INVALID;
}
static bool is_igp(struct drm_i915_private *i915)
@@ -1118,6 +1105,13 @@ static bool is_igp(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
+ const struct attribute_group *attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &pmu->events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+ };
+
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
@@ -1128,6 +1122,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
+ pmu->cpuhp.slot = CPUHP_INVALID;
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
@@ -1143,11 +1138,16 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (!pmu->name)
goto err;
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs)
+ pmu->events_attr_group.name = "events";
+ pmu->events_attr_group.attrs = create_event_attributes(pmu);
+ if (!pmu->events_attr_group.attrs)
goto err_name;
- pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+ if (!pmu->base.attr_groups)
+ goto err_attr;
+
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
@@ -1159,7 +1159,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err_attr;
+ goto err_groups;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1169,6 +1169,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
+err_groups:
+ kfree(pmu->base.attr_groups);
err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
@@ -1194,6 +1196,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ kfree(pmu->base.attr_groups);
if (!is_igp(i915))
kfree(pmu->name);
free_event_attributes(pmu);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 6c1647c5daf2..f1d6cad0d7d5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -39,9 +39,12 @@ struct i915_pmu_sample {
struct i915_pmu {
/**
- * @node: List node for CPU hotplug handling.
+ * @cpuhp: Struct used for CPU hotplug handling.
*/
- struct hlist_node node;
+ struct {
+ struct hlist_node node;
+ enum cpuhp_state slot;
+ } cpuhp;
/**
* @base: PMU base.
*/
@@ -105,6 +108,10 @@ struct i915_pmu {
*/
ktime_t sleep_last;
/**
+ * @events_attr_group: Device events attribute group.
+ */
+ struct attribute_group events_attr_group;
+ /**
* @i915_attr: Memory block holding device attributes.
*/
void *i915_attr;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6cc55c103f67..3575fd30756b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7757,6 +7757,7 @@ enum {
#define BW_BUDDY1_CTL _MMIO(0x45140)
#define BW_BUDDY2_CTL _MMIO(0x45150)
#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f56b046a32de..a18b2a244706 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -275,7 +275,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
- list_del(&rq->link);
+ list_del_rcu(&rq->link);
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void irq_semaphore_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq =
+ container_of(wrk, typeof(*rq), semaphore_work);
+
+ i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
+ i915_request_put(rq);
+}
+
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct i915_request *request =
- container_of(fence, typeof(*request), semaphore);
+ struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
switch (state) {
case FENCE_COMPLETE:
- i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
+ if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
+ i915_request_get(rq);
+ init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
+ irq_work_queue(&rq->semaphore_work);
+ }
break;
case FENCE_FREE:
- i915_request_put(request);
+ i915_request_put(rq);
break;
}
@@ -721,6 +733,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
intel_context_mark_active(ce);
+ list_add_tail_rcu(&rq->link, &tl->requests);
+
return rq;
err_unwind:
@@ -774,16 +788,26 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
struct dma_fence *fence;
int err;
- GEM_BUG_ON(i915_request_timeline(rq) ==
- rcu_access_pointer(signal->timeline));
+ if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
+ return 0;
+
+ if (i915_request_started(signal))
+ return 0;
fence = NULL;
rcu_read_lock();
spin_lock_irq(&signal->lock);
- if (!i915_request_started(signal) &&
- !list_is_first(&signal->link,
- &rcu_dereference(signal->timeline)->requests)) {
- struct i915_request *prev = list_prev_entry(signal, link);
+ do {
+ struct list_head *pos = READ_ONCE(signal->link.prev);
+ struct i915_request *prev;
+
+ /* Confirm signal has not been retired, the link is valid */
+ if (unlikely(i915_request_started(signal)))
+ break;
+
+ /* Is signal the earliest request on its timeline? */
+ if (pos == &rcu_dereference(signal->timeline)->requests)
+ break;
/*
* Peek at the request before us in the timeline. That
@@ -791,20 +815,25 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
* after acquiring a reference to it, confirm that it is
* still part of the signaler's timeline.
*/
- if (i915_request_get_rcu(prev)) {
- if (list_next_entry(prev, link) == signal)
- fence = &prev->fence;
- else
- i915_request_put(prev);
+ prev = list_entry(pos, typeof(*prev), link);
+ if (!i915_request_get_rcu(prev))
+ break;
+
+ /* After the strong barrier, confirm prev is still attached */
+ if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
+ i915_request_put(prev);
+ break;
}
- }
+
+ fence = &prev->fence;
+ } while (0);
spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
err = 0;
- if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
err = i915_sw_fence_await_dma_fence(&rq->submit,
fence, 0,
I915_FENCE_GFP);
@@ -1242,8 +1271,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
- list_add_tail(&rq->link, &timeline->requests);
-
/*
* Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before
@@ -1303,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
+ i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index f57eadcf3583..fccc339949ec 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,6 +26,7 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
#include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h"
@@ -208,6 +209,7 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
+ struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b0ade76bec90..d34141f7dcd8 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
+static inline bool is_power_of_2_u64(u64 n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 0dfcd1787e65..fe85e487e477 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -486,6 +486,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
+ mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
@@ -636,10 +637,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
- struct drm_plane *primary,
- struct drm_plane *cursor, unsigned int pipe)
+ unsigned int pipe)
{
- int ret;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
+
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
+ primary = &mtk_crtc->planes[i];
+ else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
+ cursor = &mtk_crtc->planes[i];
+ }
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
@@ -689,11 +698,12 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
}
static inline
-enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
+ unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
- else if (plane_idx == 1)
+ else if (plane_idx == (num_planes - 1))
return DRM_PLANE_TYPE_CURSOR;
else
return DRM_PLANE_TYPE_OVERLAY;
@@ -712,7 +722,8 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
- mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
+ num_planes),
mtk_ddp_comp_supported_rotations(comp));
if (ret)
return ret;
@@ -807,9 +818,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return ret;
}
- ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
- NULL, pipe);
+ ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
if (ret < 0)
return ret;
@@ -828,7 +837,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
}
- ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ ret = of_property_read_u32_index(priv->mutex_node,
+ "mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
&mtk_crtc->cmdq_event);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 1f5a112bb034..57c88de9a329 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -471,6 +471,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
+ type != MTK_DISP_OVL_2L &&
type != MTK_DISP_RDMA &&
type != MTK_DISP_WDMA)
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 914cc7619cd7..c2bd683a87c8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -80,6 +80,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state;
+ int ret;
if (plane != state->crtc->cursor)
return -EINVAL;
@@ -90,6 +91,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
if (state->state)
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
@@ -115,6 +121,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
+ swap(plane->state->fb, new_state->fb);
state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 3107b0738e40..5d75f8cf6477 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -601,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
source_id = (fault_status >> 16);
/* Page fault only */
- if ((status & mask) == BIT(i)) {
- WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
-
+ ret = -1;
+ if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
- if (!ret) {
- mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
- status &= ~mask;
- continue;
- }
- }
- /* terminal fault, print info about the fault */
- dev_err(pfdev->dev,
- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "Reason: %s\n"
- "raw fault status: 0x%X\n"
- "decoded fault status: %s\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n",
- i, addr,
- "TODO",
- fault_status,
- (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
- exception_type, panfrost_exception_name(pfdev, exception_type),
- access_type, access_type_name(pfdev, fault_status),
- source_id);
+ if (ret)
+ /* terminal fault, print info about the fault */
+ dev_err(pfdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ i, addr,
+ "TODO",
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panfrost_exception_name(pfdev, exception_type),
+ access_type, access_type_name(pfdev, fault_status),
+ source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index fd74e2611185..8696af1ee14d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -37,6 +37,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
+ struct drm_device *dev;
int ret;
if (!ent)
@@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- return drm_get_pci_dev(pdev, ent, &kms_driver);
+ dev = drm_dev_alloc(&kms_driver, &pdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free;
+
+ dev->pdev = pdev;
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ pci_set_drvdata(pdev, dev);
+
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ dev->agp = drm_agp_init(dev);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_agp;
+
+ return 0;
+
+err_agp:
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_put(dev);
+ return ret;
}
static void
@@ -575,7 +614,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
+ DRIVER_GEM | DRIVER_RENDER,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d24f23a81656..dd2f19b8022b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <linux/vga_switcheroo.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ dev->agp = NULL;
+
done_free:
kfree(rdev);
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 7c24f8f832a5..4a64f7ae437a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -107,48 +107,128 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XRGB4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XBGR4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_RGBX4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_BGRX4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XRGB1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XBGR1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_RGBX5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_BGRX5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB2101010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR2101010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA1010102,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA1010102,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
.rgb = false,
@@ -197,12 +277,6 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
- .drm_fmt = DRM_FORMAT_YUV444,
- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
- },
- {
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
.rgb = false,
@@ -221,12 +295,6 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
- .drm_fmt = DRM_FORMAT_YVU444,
- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
- },
- {
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
.rgb = false,
@@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
+ {
+ .drm_fmt = DRM_FORMAT_P010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_P210,
+ .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
};
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index c6cc94057faf..345b28b0a80a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -93,6 +93,10 @@
#define SUN8I_MIXER_FBFMT_ABGR1555 17
#define SUN8I_MIXER_FBFMT_RGBA5551 18
#define SUN8I_MIXER_FBFMT_BGRA5551 19
+#define SUN8I_MIXER_FBFMT_ARGB2101010 20
+#define SUN8I_MIXER_FBFMT_ABGR2101010 21
+#define SUN8I_MIXER_FBFMT_RGBA1010102 22
+#define SUN8I_MIXER_FBFMT_BGRA1010102 23
#define SUN8I_MIXER_FBFMT_YUYV 0
#define SUN8I_MIXER_FBFMT_UYVY 1
@@ -109,6 +113,13 @@
/* format 12 is semi-planar YUV411 UVUV */
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
+/* format 15 doesn't exist */
+/* format 16 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P010_YUV 17
+/* format 18 is P210 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV 19
+/* format 20 is packed YVU444 10-bit */
+/* format 21 is packed YUV444 10-bit */
/*
* Sub-engines listed bellow are unused for now. The EN registers are here only
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 42d445d23773..b8398ca18b0f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
};
/*
- * While all RGB formats are supported, VI planes don't support
- * alpha blending, so there is no point having formats with alpha
- * channel if their opaque analog exist.
+ * While DE2 VI layer supports same RGB formats as UI layer, alpha
+ * channel is ignored. This structure lists all unique variants
+ * where alpha channel is replaced with "don't care" (X) channel.
*/
static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XRGB8888,
+
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU411,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+};
+
+static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRA1010102,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA1010102,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P210,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
@@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
- DRM_FORMAT_YUV444,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
- DRM_FORMAT_YVU444,
};
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
@@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
u32 supported_encodings, supported_ranges;
+ unsigned int plane_cnt, format_count;
struct sun8i_vi_layer *layer;
- unsigned int plane_cnt;
+ const u32 *formats;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
+ if (mixer->cfg->is_de3) {
+ formats = sun8i_vi_layer_de3_formats;
+ format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
+ } else {
+ formats = sun8i_vi_layer_formats;
+ format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
+ }
+
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
- sun8i_vi_layer_formats,
- ARRAY_SIZE(sun8i_vi_layer_formats),
+ formats, format_count,
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 49ed55779128..953c82a4f573 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -515,6 +515,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
+ fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 017a9e0fc3bb..3af7ec80c7da 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -42,8 +42,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
* "f91a9dd35715 Fix unlinking resources from hash
* table." (Feb 2019) fixes the bug.
*/
- static int handle;
- handle++;
+ static atomic_t seqno = ATOMIC_INIT(0);
+ int handle = atomic_inc_return(&seqno);
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
@@ -99,6 +99,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
return NULL;
bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ bo->base.map_cached = true;
return &bo->base.base;
}
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index dddfca555df9..0b6ee1dee625 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -193,8 +193,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
/* The pointer is not NULL when we resume from hibernation */
- if (input_device->hid_desc != NULL)
- kfree(input_device->hid_desc);
+ kfree(input_device->hid_desc);
input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC);
if (!input_device->hid_desc)
@@ -207,8 +206,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
}
/* The pointer is not NULL when we resume from hibernation */
- if (input_device->report_desc != NULL)
- kfree(input_device->report_desc);
+ kfree(input_device->report_desc);
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 9632e2e3c4bb..319a0519ebdb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
return 0x95;
break;
}
- return -ENODEV;
+ return 0;
}
/* Provide labels for sysfs */
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index ecd9b65627ec..660556b89e9f 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -18,6 +18,59 @@
#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
#define XDPE122_PAGE_NUM 2
+static int xdpe122_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ long val;
+ s16 exponent;
+ s32 mantissa;
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Convert register value to LINEAR11 data. */
+ exponent = ((s16)ret) >> 11;
+ mantissa = ((s16)((ret & GENMASK(10, 0)) << 5)) >> 5;
+ val = mantissa * 1000L;
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ /* Convert data to VID register. */
+ switch (info->vrm_version[page]) {
+ case vr13:
+ if (val >= 500)
+ return 1 + DIV_ROUND_CLOSEST(val - 500, 10);
+ return 0;
+ case vr12:
+ if (val >= 250)
+ return 1 + DIV_ROUND_CLOSEST(val - 250, 5);
+ return 0;
+ case imvp9:
+ if (val >= 200)
+ return 1 + DIV_ROUND_CLOSEST(val - 200, 10);
+ return 0;
+ case amd625mv:
+ if (val >= 200 && val <= 1550)
+ return DIV_ROUND_CLOSEST((1550 - val) * 100,
+ 625);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
static int xdpe122_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -70,6 +123,7 @@ static struct pmbus_driver_info xdpe122_info = {
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
.identify = xdpe122_identify,
+ .read_word_data = xdpe122_read_word_data,
};
static int xdpe122_probe(struct i2c_client *client,
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 5255d3755411..1de23b4f3809 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -171,7 +171,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
/* SCL Low Time */
writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
/* SDA Hold Time, 300ns */
- writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
+ writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD);
/* Mask all master interrupt bits */
altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 16a67a64284a..b426fc956938 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -78,25 +78,6 @@
#define X1000_I2C_DC_STOP BIT(9)
-static const char * const jz4780_i2c_abrt_src[] = {
- "ABRT_7B_ADDR_NOACK",
- "ABRT_10ADDR1_NOACK",
- "ABRT_10ADDR2_NOACK",
- "ABRT_XDATA_NOACK",
- "ABRT_GCALL_NOACK",
- "ABRT_GCALL_READ",
- "ABRT_HS_ACKD",
- "SBYTE_ACKDET",
- "ABRT_HS_NORSTRT",
- "SBYTE_NORSTRT",
- "ABRT_10B_RD_NORSTRT",
- "ABRT_MASTER_DIS",
- "ARB_LOST",
- "SLVFLUSH_TXFIFO",
- "SLV_ARBLOST",
- "SLVRD_INTX",
-};
-
#define JZ4780_I2C_INTST_IGC BIT(11)
#define JZ4780_I2C_INTST_ISTT BIT(10)
#define JZ4780_I2C_INTST_ISTP BIT(9)
@@ -576,21 +557,8 @@ done:
static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
{
- int i;
-
- dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
- dev_err(&i2c->adap.dev, "device addr=%x\n",
- jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
- dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
- i2c->cmd, i2c->cmd_buf[i2c->cmd]);
- dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
- i2c->cmd, i2c->data_buf[i2c->cmd]);
-
- for (i = 0; i < 16; i++) {
- if (src & BIT(i))
- dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
- i, jz4780_i2c_abrt_src[i]);
- }
+ dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
+ src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
}
static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 1bb99b556393..05c26986637b 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -361,7 +361,7 @@ static const struct block_device_operations ide_gd_ops = {
.release = ide_gd_release,
.ioctl = ide_gd_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = ide_gd_compat_ioctl,
+ .compat_ioctl = ide_gd_compat_ioctl,
#endif
.getgeo = ide_gd_getgeo,
.check_events = ide_gd_check_events,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 68cc1b2d6824..15e99a888427 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1191,6 +1191,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
/* Sharing an ib_cm_id with different handlers is not
* supported */
spin_unlock_irqrestore(&cm.lock, flags);
+ ib_destroy_cm_id(cm_id);
return ERR_PTR(-EINVAL);
}
refcount_inc(&cm_id_priv->refcount);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 72f032160c4b..2dec3a02ab9f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3212,19 +3212,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr);
- if (ret)
+ if (ret) {
+ memset(cma_dst_addr(id_priv), 0,
+ rdma_addr_size(dst_addr));
return ret;
+ }
}
- if (cma_family(id_priv) != dst_addr->sa_family)
+ if (cma_family(id_priv) != dst_addr->sa_family) {
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
return -EINVAL;
+ }
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
return -EINVAL;
+ }
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index b1457b3464d3..cf42acca4a3a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -338,6 +338,20 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
qp->pd = pd;
qp->uobject = uobj;
qp->real_qp = qp;
+
+ qp->qp_type = attr->qp_type;
+ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+ qp->srq = attr->srq;
+ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
+ qp->event_handler = attr->event_handler;
+
+ atomic_set(&qp->usecnt, 0);
+ spin_lock_init(&qp->mr_lock);
+ INIT_LIST_HEAD(&qp->rdma_mrs);
+ INIT_LIST_HEAD(&qp->sig_mrs);
+
/*
* We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index ade71823370f..da8adadf4755 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
{
struct list_head *e, *tmp;
- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
+ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
+ list_del(e);
kfree(list_entry(e, struct iwcm_work, free_list));
+ }
}
static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 37b433aa7306..e0b0a91da696 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1757,6 +1757,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ret)
goto err_msg;
} else {
+ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
+ goto err_msg;
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 4fad732f9b3c..06e5b6787443 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -273,6 +273,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
+static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
+ u32 sg_cnt, enum dma_data_direction dir)
+{
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
+ else
+ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+}
+
+static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
+ u32 sg_cnt, enum dma_data_direction dir)
+{
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ return ib_dma_map_sg(dev, sg, sg_cnt, dir);
+}
+
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
@@ -295,11 +312,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_device *dev = qp->pd->device;
int ret;
- if (is_pci_p2pdma_page(sg_page(sg)))
- ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
- else
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
-
+ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
if (!ret)
return -ENOMEM;
sg_cnt = ret;
@@ -338,7 +351,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
return ret;
out_unmap_sg:
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -588,11 +601,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
break;
}
- if (is_pci_p2pdma_page(sg_page(sg)))
- pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
- sg_cnt, dir);
- else
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 2b4d80393bd0..2d5608315dc8 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -340,15 +340,19 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
return NULL;
if (qp_attr_mask & IB_QP_PORT)
- new_pps->main.port_num =
- (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
+ new_pps->main.port_num = qp_attr->port_num;
+ else if (qp_pps)
+ new_pps->main.port_num = qp_pps->main.port_num;
+
if (qp_attr_mask & IB_QP_PKEY_INDEX)
- new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
- qp_attr->pkey_index;
+ new_pps->main.pkey_index = qp_attr->pkey_index;
+ else if (qp_pps)
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+
if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
new_pps->main.state = IB_PORT_PKEY_VALID;
- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
+ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index b8c657b28380..cd656ad4953b 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -181,14 +181,28 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
odp_data->page_shift = PAGE_SHIFT;
odp_data->notifier.ops = ops;
+ /*
+ * A mmget must be held when registering a notifier, the owming_mm only
+ * has a mm_grab at this point.
+ */
+ if (!mmget_not_zero(umem->owning_mm)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
odp_data->tgid = get_pid(root->tgid);
ret = ib_init_umem_odp(odp_data, ops);
- if (ret) {
- put_pid(odp_data->tgid);
- kfree(odp_data);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto out_tgid;
+ mmput(umem->owning_mm);
return odp_data;
+
+out_tgid:
+ put_pid(odp_data->tgid);
+ mmput(umem->owning_mm);
+out_free:
+ kfree(odp_data);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_umem_odp_alloc_child);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 025933752e1d..060b4ebbd2ba 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1445,16 +1445,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (ret)
goto err_cb;
- qp->pd = pd;
- qp->send_cq = attr.send_cq;
- qp->recv_cq = attr.recv_cq;
- qp->srq = attr.srq;
- qp->rwq_ind_tbl = ind_tbl;
- qp->event_handler = attr.event_handler;
- qp->qp_type = attr.qp_type;
- atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
- qp->port = 0;
if (attr.send_cq)
atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3ebae3b65c28..e62c9dfc7837 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1185,16 +1185,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
if (ret)
goto err;
- qp->qp_type = qp_init_attr->qp_type;
- qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
-
- atomic_set(&qp->usecnt, 0);
- qp->mrs_used = 0;
- spin_lock_init(&qp->mr_lock);
- INIT_LIST_HEAD(&qp->rdma_mrs);
- INIT_LIST_HEAD(&qp->sig_mrs);
- qp->port = 0;
-
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp =
create_xrc_qp_user(qp, qp_init_attr, udata);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 089e201d7550..2f6323ad9c59 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
opa_get_lid(packet->dlid, 9B));
if (!mcast)
goto drop;
+ rcu_read_lock();
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
packet->qp = p->qp;
if (hfi1_do_pkey_check(packet))
- goto drop;
+ goto unlock_drop;
spin_lock_irqsave(&packet->qp->r_lock, flags);
packet_handler = qp_ok(packet);
if (likely(packet_handler))
@@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
ibp->rvp.n_pkt_drops++;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
}
+ rcu_read_unlock();
/*
* Notify rvt_multicast_detach() if it is waiting for us
* to finish.
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e4bcfa81b70a..4de380f3d581 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3570,7 +3570,8 @@ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d9bffcc93587..bb78142bca5e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -636,6 +636,7 @@ struct mlx5_ib_mr {
/* For ODP and implicit */
atomic_t num_deferred_work;
+ wait_queue_head_t q_deferred_work;
struct xarray implicit_children;
union {
struct rcu_head rcu;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4216814ba871..bf50cd91f472 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -235,7 +235,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
mr->parent = NULL;
mlx5_mr_cache_free(mr->dev, mr);
ib_umem_odp_release(odp);
- atomic_dec(&imr->num_deferred_work);
+ if (atomic_dec_and_test(&imr->num_deferred_work))
+ wake_up(&imr->q_deferred_work);
}
static void free_implicit_child_mr_work(struct work_struct *work)
@@ -554,6 +555,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->umem = &umem_odp->umem;
imr->is_odp_implicit = true;
atomic_set(&imr->num_deferred_work, 0);
+ init_waitqueue_head(&imr->q_deferred_work);
xa_init(&imr->implicit_children);
err = mlx5_ib_update_xlt(imr, 0,
@@ -611,10 +613,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
* under xa_lock while the child is in the xarray. Thus at this point
* it is only decreasing, and all work holding it is now on the wq.
*/
- if (atomic_read(&imr->num_deferred_work)) {
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&imr->num_deferred_work));
- }
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
/*
* Fence the imr before we destroy the children. This allows us to
@@ -645,10 +644,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&mr->dev->odp_srcu);
- if (atomic_read(&mr->num_deferred_work)) {
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&mr->num_deferred_work));
- }
+ wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
dma_fence_odp_mr(mr);
}
@@ -1720,7 +1716,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
u32 i;
for (i = 0; i < work->num_sge; ++i)
- atomic_dec(&work->frags[i].mr->num_deferred_work);
+ if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
+ wake_up(&work->frags[i].mr->q_deferred_work);
kvfree(work);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 33778d451b82..5ef93f8f17a1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
if (mcast == NULL)
goto drop;
this_cpu_inc(ibp->pmastats->n_multicast_rcv);
+ rcu_read_lock();
list_for_each_entry_rcu(p, &mcast->qp_list, list)
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
+ rcu_read_unlock();
/*
* Notify rvt_multicast_detach() if it is waiting for us
* to finish.
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 96ed349c0939..5cd40fb9e20c 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
{ .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
+ xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
+
ib_set_device_ops(base_dev, &siw_device_ops);
rv = ib_device_set_netdev(base_dev, netdev, 1);
if (rv)
@@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
- xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
- xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
-
INIT_LIST_HEAD(&sdev->cep_list);
INIT_LIST_HEAD(&sdev->qp_list);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 63e4f9d15fd9..a10a0c2ca2da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -213,6 +213,8 @@ static int ipoib_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops ipoib_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = ipoib_get_link_ksettings,
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index f277e467156f..2c6515e3ecf1 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -445,6 +445,11 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
path->name = kasprintf(GFP_KERNEL, "%s-%s",
src_node->name, dst_node->name);
+ if (!path->name) {
+ kfree(path);
+ return ERR_PTR(-ENOMEM);
+ }
+
return path;
}
EXPORT_SYMBOL_GPL(of_icc_get);
@@ -579,6 +584,10 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
}
path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
+ if (!path->name) {
+ kfree(path);
+ path = ERR_PTR(-ENOMEM);
+ }
out:
mutex_unlock(&icc_lock);
return path;
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 8c744578122a..a0d87ed9da69 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -300,9 +300,11 @@ static int control_loop(void *dummy)
/* i2c probing and setup */
/************************************************************************/
-static int
-do_attach( struct i2c_adapter *adapter )
+static void do_attach(struct i2c_adapter *adapter)
{
+ struct i2c_board_info info = { };
+ struct device_node *np;
+
/* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
static const unsigned short scan_ds1775[] = {
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
@@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter )
I2C_CLIENT_END
};
- if( strncmp(adapter->name, "uni-n", 5) )
- return 0;
-
- if( !x.running ) {
- struct i2c_board_info info;
+ if (x.running || strncmp(adapter->name, "uni-n", 5))
+ return;
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
+ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
+ if (np) {
+ of_node_put(np);
+ } else {
+ strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
+ }
- strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
+ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
+ if (np) {
+ of_node_put(np);
+ } else {
+ strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
-
- if( x.thermostat && x.fan ) {
- x.running = 1;
- x.poll_task = kthread_run(control_loop, NULL, "g4fand");
- }
}
- return 0;
}
static int
@@ -404,8 +405,8 @@ out:
enum chip { ds1775, adm1030 };
static const struct i2c_device_id therm_windtunnel_id[] = {
- { "therm_ds1775", ds1775 },
- { "therm_adm1030", adm1030 },
+ { "MAC,ds1775", ds1775 },
+ { "MAC,adm1030", adm1030 },
{ }
};
MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
@@ -414,6 +415,7 @@ static int
do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = cl->adapter;
+ int ret = 0;
if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE) )
@@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
switch (id->driver_data) {
case adm1030:
- return attach_fan( cl );
+ ret = attach_fan(cl);
+ break;
case ds1775:
- return attach_thermostat(cl);
+ ret = attach_thermostat(cl);
+ break;
}
- return 0;
+
+ if (!x.running && x.thermostat && x.fan) {
+ x.running = 1;
+ x.poll_task = kthread_run(control_loop, NULL, "g4fand");
+ }
+
+ return ret;
}
static struct i2c_driver g4fan_driver = {
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8bc1faf71ff2..a1df0d95151c 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -67,7 +67,6 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/random.h>
-#include <linux/sched/signal.h>
#include <trace/events/bcache.h>
#define MAX_OPEN_BUCKETS 128
@@ -734,21 +733,8 @@ int bch_open_buckets_alloc(struct cache_set *c)
int bch_cache_allocator_start(struct cache *ca)
{
- struct task_struct *k;
-
- /*
- * In case previous btree check operation occupies too many
- * system memory for bcache btree node cache, and the
- * registering process is selected by OOM killer. Here just
- * ignore the SIGKILL sent by OOM killer if there is, to
- * avoid kthread_run() being failed by pending signals. The
- * bcache registering process will exit after the registration
- * done.
- */
- if (signal_pending(current))
- flush_signals(current);
-
- k = kthread_run(bch_allocator_thread, ca, "bcache_allocator");
+ struct task_struct *k = kthread_run(bch_allocator_thread,
+ ca, "bcache_allocator");
if (IS_ERR(k))
return PTR_ERR(k);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index b12186c87f52..fa872df4e770 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -34,7 +34,6 @@
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/sched/clock.h>
-#include <linux/sched/signal.h>
#include <linux/rculist.h>
#include <linux/delay.h>
#include <trace/events/bcache.h>
@@ -1914,18 +1913,6 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
- /*
- * In case previous btree check operation occupies too many
- * system memory for bcache btree node cache, and the
- * registering process is selected by OOM killer. Here just
- * ignore the SIGKILL sent by OOM killer if there is, to
- * avoid kthread_run() being failed by pending signals. The
- * bcache registering process will exit after the registration
- * done.
- */
- if (signal_pending(current))
- flush_signals(current);
-
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
return PTR_ERR_OR_ZERO(c->gc_thread);
}
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index c82578af56a5..2ea0360108e1 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -20,8 +20,13 @@
struct dm_bio_details {
struct gendisk *bi_disk;
u8 bi_partno;
+ int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
+ bio_end_io_t *bi_end_io;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ struct bio_integrity_payload *bi_integrity;
+#endif
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
@@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
+ bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
+ bd->bi_end_io = bio->bi_end_io;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ bd->bi_integrity = bio_integrity(bio);
+#endif
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
@@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
+ atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
+ bio->bi_end_io = bd->bi_end_io;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ bio->bi_integrity = bd->bi_integrity;
+#endif
}
#endif
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 2d32821b3a5b..d3bb355819a4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2846,8 +2846,8 @@ static void cache_postsuspend(struct dm_target *ti)
prevent_background_work(cache);
BUG_ON(atomic_read(&cache->nr_io_migrations));
- cancel_delayed_work(&cache->waker);
- flush_workqueue(cache->wq);
+ cancel_delayed_work_sync(&cache->waker);
+ drain_workqueue(cache->wq);
WARN_ON(cache->tracker.in_flight);
/*
@@ -3492,7 +3492,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {2, 1, 0},
+ .version = {2, 2, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b225b3e445fa..2f03fecd312d 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,8 @@
* This file is released under the GPL.
*/
+#include "dm-bio-record.h"
+
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
@@ -201,17 +203,19 @@ struct dm_integrity_c {
__u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
- int suspending;
int failed;
struct crypto_shash *internal_hash;
+ struct dm_target *ti;
+
/* these variables are locked with endio_wait.lock */
struct rb_root in_progress;
struct list_head wait_list;
wait_queue_head_t endio_wait;
struct workqueue_struct *wait_wq;
+ struct workqueue_struct *offload_wq;
unsigned char commit_seq;
commit_id_t commit_ids[N_COMMIT_IDS];
@@ -293,11 +297,7 @@ struct dm_integrity_io {
struct completion *completion;
- struct gendisk *orig_bi_disk;
- u8 orig_bi_partno;
- bio_end_io_t *orig_bi_end_io;
- struct bio_integrity_payload *orig_bi_integrity;
- struct bvec_iter orig_bi_iter;
+ struct dm_bio_details bio_details;
};
struct journal_completion {
@@ -1439,7 +1439,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
dio->range.logical_sector += dio->range.n_sectors;
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->wait_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
return;
}
do_endio_flush(ic, dio);
@@ -1450,14 +1450,9 @@ static void integrity_end_io(struct bio *bio)
{
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
- bio->bi_iter = dio->orig_bi_iter;
- bio->bi_disk = dio->orig_bi_disk;
- bio->bi_partno = dio->orig_bi_partno;
- if (dio->orig_bi_integrity) {
- bio->bi_integrity = dio->orig_bi_integrity;
+ dm_bio_restore(&dio->bio_details, bio);
+ if (bio->bi_integrity)
bio->bi_opf |= REQ_INTEGRITY;
- }
- bio->bi_end_io = dio->orig_bi_end_io;
if (dio->completion)
complete(dio->completion);
@@ -1542,7 +1537,7 @@ static void integrity_metadata(struct work_struct *w)
}
}
- __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@@ -1586,7 +1581,7 @@ again:
if (likely(checksums != checksums_onstack))
kfree(checksums);
} else {
- struct bio_integrity_payload *bip = dio->orig_bi_integrity;
+ struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
if (bip) {
struct bio_vec biv;
@@ -1865,7 +1860,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
if (need_sync_io && from_map) {
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->metadata_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
return;
}
@@ -2005,20 +2000,13 @@ offload_to_thread:
} else
dio->completion = NULL;
- dio->orig_bi_iter = bio->bi_iter;
-
- dio->orig_bi_disk = bio->bi_disk;
- dio->orig_bi_partno = bio->bi_partno;
+ dm_bio_record(&dio->bio_details, bio);
bio_set_dev(bio, ic->dev->bdev);
-
- dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
-
- dio->orig_bi_end_io = bio->bi_end_io;
bio->bi_end_io = integrity_end_io;
-
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
+
generic_make_request(bio);
if (need_sync_io) {
@@ -2315,7 +2303,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (READ_ONCE(ic->suspending) && !ic->meta_dev)
+ if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2376,7 +2364,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
- if (unlikely(READ_ONCE(ic->suspending)))
+ if (unlikely(dm_suspended(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
@@ -2501,7 +2489,7 @@ static void bitmap_block_work(struct work_struct *w)
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
remove_range(ic, &dio->range);
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->wait_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
} else {
block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
dio->range.n_sectors, BITMAP_OP_SET);
@@ -2524,7 +2512,7 @@ static void bitmap_block_work(struct work_struct *w)
remove_range(ic, &dio->range);
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->wait_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
}
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
@@ -2804,8 +2792,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
del_timer_sync(&ic->autocommit_timer);
- WRITE_ONCE(ic->suspending, 1);
-
if (ic->recalc_wq)
drain_workqueue(ic->recalc_wq);
@@ -2834,8 +2820,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
#endif
}
- WRITE_ONCE(ic->suspending, 0);
-
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
ic->journal_uptodate = true;
@@ -2888,17 +2872,24 @@ static void dm_integrity_resume(struct dm_target *ti)
} else {
replay_journal(ic);
if (ic->mode == 'B') {
- int mode;
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
- mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
- block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
- block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
- block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
+ block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
+ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
+ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
+ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
+ }
rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
}
@@ -2967,7 +2958,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" meta_device:%s", ic->meta_dev->name);
if (ic->sectors_per_block != 1)
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
- if (ic->recalculate_flag)
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
@@ -3623,6 +3614,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
ti->private = ic;
ti->per_io_data_size = sizeof(struct dm_integrity_io);
+ ic->ti = ti;
ic->in_progress = RB_ROOT;
INIT_LIST_HEAD(&ic->wait_list);
@@ -3836,6 +3828,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
+ METADATA_WORKQUEUE_MAX_ACTIVE);
+ if (!ic->offload_wq) {
+ ti->error = "Cannot allocate workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+
ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
if (!ic->commit_wq) {
ti->error = "Cannot allocate workqueue";
@@ -4140,6 +4140,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->metadata_wq);
if (ic->wait_wq)
destroy_workqueue(ic->wait_wq);
+ if (ic->offload_wq)
+ destroy_workqueue(ic->offload_wq);
if (ic->commit_wq)
destroy_workqueue(ic->commit_wq);
if (ic->writer_wq)
@@ -4200,7 +4202,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2bc18c9c3abc..58fd137b6ae1 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -2053,7 +2053,7 @@ static int multipath_busy(struct dm_target *ti)
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index fc9947d6210c..76b6b323bf4b 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
+ pmd_write_unlock(pmd);
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
- pmd_write_unlock(pmd);
kfree(pmd);
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0d61e9c67986..eec9f252e935 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1221,7 +1221,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index b9e27e37a943..a09bdc000e64 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -625,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
wc->freelist_size++;
}
+static inline void writecache_verify_watermark(struct dm_writecache *wc)
+{
+ if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
+ queue_work(wc->writeback_wq, &wc->writeback_work);
+}
+
static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
{
struct wc_entry *e;
@@ -650,8 +656,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, s
list_del(&e->lru);
}
wc->freelist_size--;
- if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
- queue_work(wc->writeback_wq, &wc->writeback_work);
+
+ writecache_verify_watermark(wc);
return e;
}
@@ -842,7 +848,7 @@ static void writecache_suspend(struct dm_target *ti)
}
wc_unlock(wc);
- flush_workqueue(wc->writeback_wq);
+ drain_workqueue(wc->writeback_wq);
wc_lock(wc);
if (flush_on_suspend)
@@ -965,6 +971,8 @@ erase_this:
writecache_commit_flushed(wc, false);
}
+ writecache_verify_watermark(wc);
+
wc_unlock(wc);
}
@@ -2312,7 +2320,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 70a1063161c0..f4f83d39b3dc 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -533,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
/* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
- if (!cw) {
-
+ if (cw) {
+ dmz_get_chunk_work(cw);
+ } else {
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (unlikely(!cw)) {
@@ -543,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
}
INIT_WORK(&cw->work, dmz_chunk_work);
- refcount_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 1);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -556,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
}
bio_list_add(&cw->bio_list, bio);
- dmz_get_chunk_work(cw);
dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
@@ -967,7 +967,7 @@ static int dmz_iterate_devices(struct dm_target *ti,
static struct target_type dmz_type = {
.name = "zoned",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = dmz_ctr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b89f07ee2eff..0413018c8305 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1788,7 +1788,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the
* top-level queue for congestion.
*/
- r = md->queue->backing_dev_info->wb.state & bdi_bits;
+ struct backing_dev_info *bdi = md->queue->backing_dev_info;
+ r = bdi->wb.congested->state & bdi_bits;
} else {
map = dm_get_live_table_fast(md);
if (map)
@@ -1854,15 +1855,6 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
-static void dm_init_normal_md_queue(struct mapped_device *md)
-{
- /*
- * Initialize aspects of queue that aren't relevant for blk-mq
- */
- md->queue->backing_dev_info->congested_data = md;
- md->queue->backing_dev_info->congested_fn = dm_any_congested;
-}
-
static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
@@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+static void dm_init_congested_fn(struct mapped_device *md)
+{
+ md->queue->backing_dev_info->congested_data = md;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
+}
+
/*
* Setup the DM device's queue based on md's type
*/
@@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
+ dm_init_congested_fn(md);
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
- dm_init_normal_md_queue(md);
+ dm_init_congested_fn(md);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
@@ -2368,6 +2367,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
map = dm_get_live_table(md, &srcu_idx);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
+ set_bit(DMF_SUSPENDED, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 7c429ce98bae..668770e9f609 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink,
return -EINVAL;
for (i = 0; i < entity->num_pads; i++) {
- if (entity->pads[i].flags == MEDIA_PAD_FL_SINK)
+ if (entity->pads[i].flags & MEDIA_PAD_FL_SINK)
pad_is_sink = true;
- else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE)
+ else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
pad_is_sink = false;
else
continue; /* This is an error! */
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index 3c93d9232c3c..b6e39fbd8ad5 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
{ V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV},
{ V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
};
@@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_HSV32:
- rf->cr = rf->luma + 1;
- rf->cb = rf->cr + 2;
- rf->luma += 2;
- break;
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_XBGR32:
- rf->cb = rf->luma;
- rf->cr = rf->cb + 2;
- rf->luma++;
- break;
case V4L2_PIX_FMT_ARGB32:
rf->alpha = rf->luma;
rf->cr = rf->luma + 1;
rf->cb = rf->cr + 2;
rf->luma += 2;
break;
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ABGR32:
rf->cb = rf->luma;
rf->cr = rf->cb + 2;
@@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
rf->alpha = rf->cr + 1;
break;
case V4L2_PIX_FMT_BGRX32:
- rf->cb = rf->luma + 1;
- rf->cr = rf->cb + 2;
- rf->luma += 2;
- break;
case V4L2_PIX_FMT_BGRA32:
rf->alpha = rf->luma;
rf->cb = rf->luma + 1;
@@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
rf->luma += 2;
break;
case V4L2_PIX_FMT_RGBX32:
- rf->cr = rf->luma;
- rf->cb = rf->cr + 2;
- rf->luma++;
- break;
case V4L2_PIX_FMT_RGBA32:
rf->alpha = rf->luma + 3;
rf->cr = rf->luma;
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index afda438d4e0a..0655aa9ecf28 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -635,8 +635,6 @@ static void pulse8_cec_adap_free(struct cec_adapter *adap)
cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
cancel_work_sync(&pulse8->irq_work);
cancel_work_sync(&pulse8->tx_work);
- serio_close(pulse8->serio);
- serio_set_drvdata(pulse8->serio, NULL);
kfree(pulse8);
}
@@ -652,6 +650,9 @@ static void pulse8_disconnect(struct serio *serio)
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
+ pulse8->serio = NULL;
+ serio_set_drvdata(serio, NULL);
+ serio_close(serio);
}
static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
@@ -840,6 +841,8 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
serio_set_drvdata(serio, pulse8);
INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler);
INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler);
+ INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
+ pulse8_ping_eeprom_work_handler);
mutex_init(&pulse8->lock);
spin_lock_init(&pulse8->msg_lock);
pulse8->config_pending = false;
@@ -865,17 +868,16 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->restoring_config = true;
}
- INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
- pulse8_ping_eeprom_work_handler);
schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
return 0;
close_serio:
+ pulse8->serio = NULL;
+ serio_set_drvdata(serio, NULL);
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
- serio_set_drvdata(serio, NULL);
free_device:
kfree(pulse8);
return err;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 1afd9c6ad908..cc34c5ab7009 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -880,12 +880,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
goto err_rel_entity1;
/* Connect the three entities */
- ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
+ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rel_entity2;
- ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
+ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rm_links0;
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 25e5f24b3fec..5bdf57472314 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -2112,8 +2112,8 @@ exit_done:
return status;
}
-static int altera_get_note(u8 *p, s32 program_size,
- s32 *offset, char *key, char *value, int length)
+static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
+ char *key, char *value, int keylen, int vallen)
/*
* Gets key and value of NOTE fields in the JBC file.
* Can be called in two modes: if offset pointer is NULL,
@@ -2170,7 +2170,7 @@ static int altera_get_note(u8 *p, s32 program_size,
&p[note_table + (8 * i) + 4])];
if (value != NULL)
- strlcpy(value, value_ptr, length);
+ strlcpy(value, value_ptr, vallen);
}
}
@@ -2189,13 +2189,13 @@ static int altera_get_note(u8 *p, s32 program_size,
strlcpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
- length);
+ keylen);
if (value != NULL)
strlcpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
- length);
+ vallen);
*offset = i + 1;
}
@@ -2449,7 +2449,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
__func__, (format_version == 2) ? "Jam STAPL" :
"pre-standardized Jam 1.1");
while (altera_get_note((u8 *)fw->data, fw->size,
- &offset, key, value, 256) == 0)
+ &offset, key, value, 32, 256) == 0)
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
__func__, key, value);
}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index c3a160c18047..3955fa5db43c 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1590,7 +1590,7 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
-void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 5eea8d70a85d..ce15a05f23d4 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
return 0;
}
+static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
+{
+ int ret;
+
+ ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ pr_warn("%s: enable PCI MSI failed, error=%d\n",
+ mmc_hostname(slot->host->mmc), ret);
+ return;
+ }
+
+ slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
+}
+
static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
+ gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);
@@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
+ gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 66e410e58c8e..02565bc2be8a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -444,6 +444,8 @@ source "drivers/net/fddi/Kconfig"
source "drivers/net/hippi/Kconfig"
+source "drivers/net/ipa/Kconfig"
+
config NET_SB1000
tristate "General Instruments Surfboard 1000"
depends on PNP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 65967246f240..94b60800887a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
obj-$(CONFIG_HAMRADIO) += hamradio/
+obj-$(CONFIG_QCOM_IPA) += ipa/
obj-$(CONFIG_PLIP) += plip/
obj-$(CONFIG_PPP) += ppp/
obj-$(CONFIG_PPP_ASYNC) += ppp/
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 15337e9d4fad..cc0703c3d57f 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -250,6 +250,9 @@ static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
+ /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
+ * socket type is v6 an explicit call to udp_encap_enable is needed.
+ */
if (sock->sk->sk_family == AF_INET6)
udp_encap_enable();
@@ -556,10 +559,17 @@ static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf)
+static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
+ struct netlink_ext_ack *extack)
{
- if (!data[IFLA_BAREUDP_PORT] || !data[IFLA_BAREUDP_ETHERTYPE])
+ if (!data[IFLA_BAREUDP_PORT]) {
+ NL_SET_ERR_MSG(extack, "port not specified");
return -EINVAL;
+ }
+ if (!data[IFLA_BAREUDP_ETHERTYPE]) {
+ NL_SET_ERR_MSG(extack, "ethertype not specified");
+ return -EINVAL;
+ }
if (data[IFLA_BAREUDP_PORT])
conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
@@ -635,7 +645,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
struct bareudp_conf conf;
int err;
- err = bareudp2info(data, &conf);
+ err = bareudp2info(data, &conf, extack);
if (err)
return err;
@@ -801,6 +811,7 @@ static void __exit bareudp_cleanup_module(void)
}
module_exit(bareudp_cleanup_module);
+MODULE_ALIAS_RTNL_LINK("bareudp");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 1cc2cd894f87..c81698550e5a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -50,11 +50,6 @@ struct arp_pkt {
};
#pragma pack()
-static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
-{
- return (struct arp_pkt *)skb_network_header(skb);
-}
-
/* Forward declaration */
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
bool strict_match);
@@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
spin_unlock(&bond->mode_lock);
}
-static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
+static struct slave *rlb_choose_channel(struct sk_buff *skb,
+ struct bonding *bond,
+ const struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct arp_pkt *arp = arp_pkt(skb);
struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
@@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
*/
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
- struct arp_pkt *arp = arp_pkt(skb);
struct slave *tx_slave = NULL;
+ struct arp_pkt *arp;
+
+ if (!pskb_network_may_pull(skb, sizeof(*arp)))
+ return NULL;
+ arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
@@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
- tx_slave = rlb_choose_channel(skb, bond);
+ tx_slave = rlb_choose_channel(skb, bond, arp);
if (tx_slave)
bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
tx_slave->dev->addr_len);
@@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* When the arp reply is received the entry will be updated
* with the correct unicast address of the client.
*/
- tx_slave = rlb_choose_channel(skb, bond);
+ tx_slave = rlb_choose_channel(skb, bond, arp);
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 6ee06a49fb4c..68834a2853c9 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -883,6 +883,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
= { .len = sizeof(struct can_bittiming) },
[IFLA_CAN_DATA_BITTIMING_CONST]
= { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
};
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 2f5c287eac95..686d853fc249 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -348,11 +348,8 @@ static void slcan_write_wakeup(struct tty_struct *tty)
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
- if (!sl)
- goto out;
-
- schedule_work(&sl->tx_work);
-out:
+ if (sl)
+ schedule_work(&sl->tx_work);
rcu_read_unlock();
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index d8fda4a02640..fd1d6676ae4f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -67,7 +67,7 @@ static void port_r_cnt(struct ksz_device *dev, int port)
static void ksz_mib_read_work(struct work_struct *work)
{
struct ksz_device *dev = container_of(work, struct ksz_device,
- mib_read);
+ mib_read.work);
struct ksz_port_mib *mib;
struct ksz_port *p;
int i;
@@ -93,32 +93,24 @@ static void ksz_mib_read_work(struct work_struct *work)
p->read = false;
mutex_unlock(&mib->cnt_mutex);
}
-}
-
-static void mib_monitor(struct timer_list *t)
-{
- struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
- mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
- schedule_work(&dev->mib_read);
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
}
void ksz_init_mib_timer(struct ksz_device *dev)
{
int i;
+ INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+
/* Read MIB counters every 30 seconds to avoid overflow. */
dev->mib_read_interval = msecs_to_jiffies(30000);
- INIT_WORK(&dev->mib_read, ksz_mib_read_work);
- timer_setup(&dev->mib_read_timer, mib_monitor, 0);
-
for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
/* Start the timer 2 seconds later. */
- dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
- add_timer(&dev->mib_read_timer);
+ schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -152,7 +144,7 @@ void ksz_adjust_link(struct dsa_switch *ds, int port,
/* Read all MIB counters when the link is going down. */
if (!phydev->link) {
p->read = true;
- schedule_work(&dev->mib_read);
+ schedule_delayed_work(&dev->mib_read, 0);
}
mutex_lock(&dev->dev_mutex);
if (!phydev->link)
@@ -477,10 +469,8 @@ EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
/* timer started */
- if (dev->mib_read_timer.expires) {
- del_timer_sync(&dev->mib_read_timer);
- flush_work(&dev->mib_read);
- }
+ if (dev->mib_read_interval)
+ cancel_delayed_work_sync(&dev->mib_read);
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a20ebb749377..f2c9bb68fd33 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -80,8 +80,7 @@ struct ksz_device {
struct vlan_table *vlan_cache;
struct ksz_port *ports;
- struct timer_list mib_read_timer;
- struct work_struct mib_read;
+ struct delayed_work mib_read;
unsigned long mib_read_interval;
u16 br_member;
u16 member;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 86818ab3bb07..a6e365348459 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1222,6 +1222,64 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
+static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ val = mt7530_read(priv, MT7530_MFC);
+
+ /* MT7530 only supports one monitor port */
+ if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
+ return -EEXIST;
+
+ val |= MIRROR_EN;
+ val &= ~MIRROR_MASK;
+ val |= mirror->to_local_port;
+ mt7530_write(priv, MT7530_MFC, val);
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (ingress) {
+ val |= PORT_RX_MIR;
+ priv->mirror_rx |= BIT(port);
+ } else {
+ val |= PORT_TX_MIR;
+ priv->mirror_tx |= BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ return 0;
+}
+
+static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (mirror->ingress) {
+ val &= ~PORT_RX_MIR;
+ priv->mirror_rx &= ~BIT(port);
+ } else {
+ val &= ~PORT_TX_MIR;
+ priv->mirror_tx &= ~BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = mt7530_read(priv, MT7530_MFC);
+ val &= ~MIRROR_EN;
+ mt7530_write(priv, MT7530_MFC, val);
+ }
+}
+
static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -1613,6 +1671,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
+ .port_mirror_add = mt7530_port_mirror_add,
+ .port_mirror_del = mt7530_port_mirror_del,
.phylink_validate = mt7530_phylink_validate,
.phylink_mac_link_state = mt7530_phylink_mac_link_state,
.phylink_mac_config = mt7530_phylink_mac_config,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index ccb9da8cad0d..b7cfb3d52b1c 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -36,6 +36,9 @@ enum {
#define CPU_EN BIT(7)
#define CPU_PORT(x) ((x) << 4)
#define CPU_MASK (0xf << 4)
+#define MIRROR_EN BIT(3)
+#define MIRROR_PORT(x) ((x) & 0x7)
+#define MIRROR_MASK 0x7
/* Registers for address table access */
#define MT7530_ATA1 0x74
@@ -141,6 +144,8 @@ enum mt7530_stp_state {
/* Register for port control */
#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
+#define PORT_TX_MIR BIT(9)
+#define PORT_RX_MIR BIT(8)
#define PORT_VLAN(x) ((x) & 0x3)
enum mt7530_port_mode {
@@ -460,6 +465,8 @@ struct mt7530_priv {
phy_interface_t p6_interface;
phy_interface_t p5_interface;
unsigned int p5_intf_sel;
+ u8 mirror_rx;
+ u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 483db9d133c3..fb4c97a58bd4 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2830,6 +2830,8 @@ static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
goto unlock;
}
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
unlock:
mv88e6xxx_reg_unlock(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 01503014b1ee..8fd483020c5b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1099,6 +1099,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
{
int err, irq, virq;
+ chip->g2_irq.masked = ~0;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ return err;
+
chip->g2_irq.domain = irq_domain_add_simple(
chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
@@ -1108,7 +1115,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
irq_create_mapping(chip->g2_irq.domain, irq);
chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
- chip->g2_irq.masked = ~0;
chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
MV88E6XXX_G1_STS_IRQ_DEVICE);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 7e66821b05b4..69546383a382 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2,6 +2,7 @@
/* Copyright 2019 NXP Semiconductors
*/
#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
@@ -401,6 +402,9 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+ ocelot->vcap_is2_keys = felix->info->vcap_is2_keys;
+ ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
+ ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
@@ -512,12 +516,22 @@ static int felix_setup(struct dsa_switch *ds)
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
+ /* Bring up the CPU port module and configure the NPI port */
if (dsa_is_cpu_port(ds, port))
- ocelot_set_cpu_port(ocelot, port,
- OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_LONG);
+ ocelot_configure_cpu(ocelot, port,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_LONG);
}
+ /* Include the CPU port module in the forwarding mask for unknown
+ * unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
+ * excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
+ * Ocelot relies on whitelisting MAC addresses towards PGID_CPU.
+ */
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_UC);
+
/* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
* isn't instantiated for the Felix PF.
* In-band AN may take a few ms to complete, so we need to poll.
@@ -595,6 +609,30 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
return false;
}
+static int felix_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
+}
+
static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
@@ -626,6 +664,9 @@ static const struct dsa_switch_ops felix_switch_ops = {
.port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
};
static struct felix_info *felix_instance_tbl[] = {
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 3a7580015b62..82d46f260041 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -18,6 +18,9 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
+ struct vcap_field *vcap_is2_keys;
+ struct vcap_field *vcap_is2_actions;
+ const struct vcap_props *vcap;
int switch_pci_bar;
int imdio_pci_bar;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 93800e81cdd4..b4078f3c5c38 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -3,12 +3,17 @@
* Copyright 2018-2019 NXP Semiconductors
*/
#include <linux/fsl/enetc_mdio.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
+#define VSC9959_VCAP_IS2_CNT 1024
+#define VSC9959_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC9959_VCAP_PORT_CNT 6
+
/* TODO: should find a better place for these */
#define USXGMII_BMCR_RESET BIT(15)
#define USXGMII_BMCR_AN_EN BIT(12)
@@ -547,6 +552,129 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
{ .offset = 0x111, .name = "drop_green_prio_7", },
};
+struct vcap_field vsc9959_vcap_is2_keys[] = {
+ /* Common: 41 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 7},
+ [VCAP_IS2_HK_RSV2] = { 20, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 21, 1},
+ [VCAP_IS2_HK_L2_MC] = { 22, 1},
+ [VCAP_IS2_HK_L2_BC] = { 23, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 24, 1},
+ [VCAP_IS2_HK_VID] = { 25, 12},
+ [VCAP_IS2_HK_DEI] = { 37, 1},
+ [VCAP_IS2_HK_PCP] = { 38, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 41, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 89, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {137, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {153, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {169, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {177, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {137, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {137, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 41, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 89, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 90, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 91, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 92, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 93, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 95, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 97, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {129, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {161, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 41, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 42, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 43, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 44, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 45, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 46, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 54, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 86, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {119, 1},
+ [VCAP_IS2_HK_L4_SPORT] = {120, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {136, 16},
+ [VCAP_IS2_HK_L4_RNG] = {152, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1},
+ [VCAP_IS2_HK_L4_URG] = {162, 1},
+ [VCAP_IS2_HK_L4_ACK] = {163, 1},
+ [VCAP_IS2_HK_L4_PSH] = {164, 1},
+ [VCAP_IS2_HK_L4_RST] = {165, 1},
+ [VCAP_IS2_HK_L4_SYN] = {166, 1},
+ [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {168, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {176, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {119, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {127, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 41, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 42, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {170, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {137, 7},
+ [VCAP_IS2_HK_OAM_VER] = {144, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {149, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {157, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {165, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {181, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1},
+};
+
+struct vcap_field vsc9959_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static const struct vcap_props vsc9959_vcap_props[] = {
+ [VCAP_IS2] = {
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VSC9959_VCAP_IS2_CNT,
+ .entry_width = VSC9959_VCAP_IS2_ENTRY_WIDTH,
+ .action_count = VSC9959_VCAP_IS2_CNT +
+ VSC9959_VCAP_PORT_CNT + 2,
+ .action_width = 89,
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 44,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = 4,
+ .counter_width = 32,
+ },
+};
+
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
@@ -1088,6 +1216,9 @@ struct felix_info felix_info_vsc9959 = {
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .vcap_is2_keys = vsc9959_vcap_is2_keys,
+ .vcap_is2_actions = vsc9959_vcap_is2_actions,
+ .vcap = vsc9959_vcap_props,
.shared_queue_sz = 128 * 1024,
.num_ports = 6,
.switch_pci_bar = 4,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c27cc7b37440..d42f085d4272 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -824,6 +824,7 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, MII);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 100baseT1_Full);
if (mii->xmii_mode[port] == XMII_MODE_RGMII)
phylink_set(mask, 1000baseT_Full);
@@ -1743,7 +1744,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
if (!dsa_is_user_port(ds, port))
continue;
- kthread_destroy_worker(sp->xmit_worker);
+ if (sp->xmit_worker)
+ kthread_destroy_worker(sp->xmit_worker);
}
sja1105_tas_teardown(ds);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 868265a2ec00..552d4cbf6dbd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -826,6 +826,8 @@ static int ena_set_tunable(struct net_device *netdev,
}
static const struct ethtool_ops ena_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo,
.get_msglevel = ena_get_msglevel,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index b23c8ee24ee3..61f39a0e04f9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -450,30 +450,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- /* Check for not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) ||
- (ec->tx_coalesce_usecs) ||
- (ec->tx_coalesce_usecs_irq) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->use_adaptive_rx_coalesce) ||
- (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) ||
- (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) ||
- (ec->tx_coalesce_usecs_low) ||
- (ec->tx_max_coalesced_frames_low) ||
- (ec->pkt_rate_high) ||
- (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval)) {
- netdev_err(netdev, "unsupported coalescing parameter\n");
- return -EOPNOTSUPP;
- }
-
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
rx_frames = ec->rx_max_coalesced_frames;
@@ -837,6 +813,8 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
.set_msglevel = xgbe_set_msglevel,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 0bdaa0d785b7..6781256a318a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -386,21 +386,10 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
cfg = aq_nic_get_cfg(aq_nic);
- /* This is not yet supported
- */
- if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
-
/* Atlantic only supports timing based coalescing
*/
if (coal->rx_max_coalesced_frames > 1 ||
- coal->rx_coalesce_usecs_irq ||
- coal->rx_max_coalesced_frames_irq)
- return -EOPNOTSUPP;
-
- if (coal->tx_max_coalesced_frames > 1 ||
- coal->tx_coalesce_usecs_irq ||
- coal->tx_max_coalesced_frames_irq)
+ coal->tx_max_coalesced_frames > 1)
return -EOPNOTSUPP;
/* We do not support frame counting. Check this
@@ -742,6 +731,8 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
}
const struct ethtool_ops aq_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 42f0c5c6ec2d..6b4f701e7006 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -225,7 +225,7 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[0];
+ u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 0058051ba925..2720bde5034e 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
depends on ATH79
- select PHYLIB
+ select PHYLINK
help
If you wish to compile a kernel for AR7XXX/91XXX and enable
ethernet support, then you should always answer Y to this.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index e95687a780fb..02b7705393ca 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -314,6 +315,8 @@ struct ag71xx {
dma_addr_t stop_desc_dma;
phy_interface_t phy_if_mode;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct delayed_work restart_work;
struct timer_list oom_timer;
@@ -845,24 +848,91 @@ static void ag71xx_hw_start(struct ag71xx *ag)
netif_wake_queue(ag->ndev);
}
-static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
+static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct phy_device *phydev = ag->ndev->phydev;
- u32 cfg2;
- u32 ifctl;
- u32 fifo5;
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- if (!phydev->link && update) {
- ag71xx_hw_stop(ag);
+ if (phylink_autoneg_inband(mode))
return;
- }
if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
ag71xx_fast_reset(ag);
+ if (ag->tx_ring.desc_split) {
+ ag->fifodata[2] &= 0xffff;
+ ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
+}
+
+static void ag71xx_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_MII) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, MII);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_GMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ state->link = 0;
+}
+
+static void ag71xx_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void ag71xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
+
+ ag71xx_hw_stop(ag);
+}
+
+static void ag71xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
+ u32 cfg2;
+ u32 ifctl;
+ u32 fifo5;
+
cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
- cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0;
+ cfg2 |= duplex ? MAC_CFG2_FDX : 0;
ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
ifctl &= ~(MAC_IFCTL_SPEED);
@@ -870,7 +940,7 @@ static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
fifo5 &= ~FIFO_CFG5_BM;
- switch (phydev->speed) {
+ switch (speed) {
case SPEED_1000:
cfg2 |= MAC_CFG2_IF_1000;
fifo5 |= FIFO_CFG5_BM;
@@ -883,72 +953,38 @@ static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
cfg2 |= MAC_CFG2_IF_10_100;
break;
default:
- WARN(1, "not supported speed %i\n", phydev->speed);
return;
}
- if (ag->tx_ring.desc_split) {
- ag->fifodata[2] &= 0xffff;
- ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
- }
-
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
ag71xx_hw_start(ag);
-
- if (update)
- phy_print_status(phydev);
}
-static void ag71xx_phy_link_adjust(struct net_device *ndev)
-{
- struct ag71xx *ag = netdev_priv(ndev);
-
- ag71xx_link_adjust(ag, true);
-}
+static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
+ .validate = ag71xx_mac_validate,
+ .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
+ .mac_an_restart = ag71xx_mac_an_restart,
+ .mac_config = ag71xx_mac_config,
+ .mac_link_down = ag71xx_mac_link_down,
+ .mac_link_up = ag71xx_mac_link_up,
+};
-static int ag71xx_phy_connect(struct ag71xx *ag)
+static int ag71xx_phylink_setup(struct ag71xx *ag)
{
- struct device_node *np = ag->pdev->dev.of_node;
- struct net_device *ndev = ag->ndev;
- struct device_node *phy_node;
- struct phy_device *phydev;
- int ret;
-
- if (of_phy_is_fixed_link(np)) {
- ret = of_phy_register_fixed_link(np);
- if (ret < 0) {
- netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n",
- ret);
- return ret;
- }
+ struct phylink *phylink;
- phy_node = of_node_get(np);
- } else {
- phy_node = of_parse_phandle(np, "phy-handle", 0);
- }
+ ag->phylink_config.dev = &ag->ndev->dev;
+ ag->phylink_config.type = PHYLINK_NETDEV;
- if (!phy_node) {
- netif_err(ag, probe, ndev, "Could not find valid phy node\n");
- return -ENODEV;
- }
-
- phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust,
- 0, ag->phy_if_mode);
-
- of_node_put(phy_node);
-
- if (!phydev) {
- netif_err(ag, probe, ndev, "Could not connect to PHY device\n");
- return -ENODEV;
- }
-
- phy_attached_info(phydev);
+ phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
+ ag->phy_if_mode, &ag71xx_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+ ag->phylink = phylink;
return 0;
}
@@ -1239,6 +1275,13 @@ static int ag71xx_open(struct net_device *ndev)
unsigned int max_frame_len;
int ret;
+ ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
+ if (ret) {
+ netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
+ ret);
+ goto err;
+ }
+
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
ag->rx_buf_size =
SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
@@ -1251,11 +1294,7 @@ static int ag71xx_open(struct net_device *ndev)
if (ret)
goto err;
- ret = ag71xx_phy_connect(ag);
- if (ret)
- goto err;
-
- phy_start(ndev->phydev);
+ phylink_start(ag->phylink);
return 0;
@@ -1268,8 +1307,8 @@ static int ag71xx_stop(struct net_device *ndev)
{
struct ag71xx *ag = netdev_priv(ndev);
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
+ phylink_stop(ag->phylink);
+ phylink_disconnect_phy(ag->phylink);
ag71xx_hw_disable(ag);
return 0;
@@ -1414,13 +1453,14 @@ static void ag71xx_restart_work_func(struct work_struct *work)
{
struct ag71xx *ag = container_of(work, struct ag71xx,
restart_work.work);
- struct net_device *ndev = ag->ndev;
rtnl_lock();
ag71xx_hw_disable(ag);
ag71xx_hw_enable(ag);
- if (ndev->phydev->link)
- ag71xx_link_adjust(ag, false);
+
+ phylink_stop(ag->phylink);
+ phylink_start(ag->phylink);
+
rtnl_unlock();
}
@@ -1759,6 +1799,12 @@ static int ag71xx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ err = ag71xx_phylink_setup(ag);
+ if (err) {
+ netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
+ goto err_mdio_remove;
+ }
+
err = register_netdev(ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a780b7215021..6fb620e25208 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -40,7 +40,6 @@
#include "b44.h"
#define DRV_MODULE_NAME "b44"
-#define DRV_MODULE_VERSION "2.0"
#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
#define B44_DEF_MSG_ENABLE \
@@ -97,7 +96,6 @@
MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
module_param(b44_debug, int, 0);
@@ -1791,7 +1789,6 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct ssb_bus *bus = bp->sdev->bus;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
@@ -2347,8 +2344,6 @@ static int b44_init_one(struct ssb_device *sdev,
instance++;
- pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
-
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 620cd3fc1fbc..916824cca3fd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -22,7 +22,6 @@
#include "bcm63xx_enet.h"
static char bcm_enet_driver_name[] = "bcm63xx_enet";
-static char bcm_enet_driver_version[] = "1.0";
static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
@@ -1304,9 +1303,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, bcm_enet_driver_version,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
@@ -1706,7 +1702,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
- ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
@@ -2529,10 +2524,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev,
static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
- strncpy(drvinfo->version, bcm_enet_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index e0611cba87f9..af7ce5c5488c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, "0.1", sizeof(info->version));
strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
}
@@ -624,8 +623,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
- (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
- ec->use_adaptive_tx_coalesce)
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
return -EINVAL;
for (i = 0; i < dev->num_tx_queues; i++)
@@ -2135,7 +2133,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
return -ENOSPC;
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
- if (index > RXCHK_BRCM_TAG_MAX)
+ if (index >= RXCHK_BRCM_TAG_MAX)
return -ENOSPC;
/* Location is the classification ID, and index is the position
@@ -2210,6 +2208,9 @@ static int bcm_sysport_set_rxnfc(struct net_device *dev,
}
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dbb7874607ca..e1c236cab2a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -59,8 +59,6 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.6"
-#define DRV_MODULE_RELDATE "January 29, 2014"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -72,13 +70,9 @@
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_MIPS_FILE_06);
MODULE_FIRMWARE(FW_RV2P_FILE_06);
MODULE_FIRMWARE(FW_MIPS_FILE_09);
@@ -7048,7 +7042,6 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct bnx2 *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
@@ -7819,6 +7812,11 @@ static int bnx2_set_channels(struct net_device *dev,
}
static const struct ethtool_ops bnx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_drvinfo = bnx2_get_drvinfo,
.get_regs_len = bnx2_get_regs_len,
.get_regs = bnx2_get_regs,
@@ -8562,15 +8560,11 @@ static const struct net_device_ops bnx2_netdev_ops = {
static int
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed = 0;
struct net_device *dev;
struct bnx2 *bp;
int rc;
char str[40];
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
if (!dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 6026b53137aa..4f5b2b81be3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -32,8 +32,14 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
+/* FIXME: Delete the DRV_MODULE_VERSION below, but please be warned
+ * that it is not an easy task because such change has all chances
+ * to break this driver due to amount of abuse of in-kernel interfaces
+ * between modules and FW.
+ *
+ * DO NOT UPDATE DRV_MODULE_VERSION below.
+ */
#define DRV_MODULE_VERSION "1.713.36-0"
-#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 4a0ba6801c9e..7cea33803f7f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,13 +1112,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
u32 mbi;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
-
- memset(version, 0, sizeof(version));
- snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d",
- BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION);
- strlcat(info->version, version, sizeof(info->version));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -3663,6 +3656,7 @@ static int bnx2x_get_ts_info(struct net_device *dev,
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1c26fa962233..db5107e7937c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -81,17 +81,12 @@
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
- DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Eliezer Tamir");
MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
@@ -14480,8 +14475,6 @@ static int __init bnx2x_init(void)
{
int ret;
- pr_info("%s", version);
-
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
pr_err("Cannot create workqueue\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f9a8151f092c..663dcf614004 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -70,12 +70,8 @@
#define BNXT_TX_TIMEOUT (5 * HZ)
-static const char version[] =
- "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
-MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@@ -2166,6 +2162,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct tx_cmp *txcmp;
cpr->has_more_work = 0;
+ cpr->had_work_done = 1;
while (1) {
int rc;
@@ -2179,7 +2176,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
@@ -2396,7 +2392,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
- u64 dbr_type, bool all)
+ u64 dbr_type)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
@@ -2405,7 +2401,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && (all || cpr2->had_work_done)) {
+ if (cpr2 && cpr2->had_work_done) {
db = &cpr2->cp_db;
writeq(db->db_key64 | dbr_type |
RING_CMP(cpr2->cp_raw_cons), db->doorbell);
@@ -2428,22 +2424,16 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
- if (cpr->has_more_work) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
- return work_done;
- }
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
- if (napi_complete_done(napi, work_done))
- BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
- return work_done;
}
while (1) {
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
- false);
+ if (cpr->has_more_work)
+ break;
+
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
cpr->cp_raw_cons = raw_cons;
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
@@ -2463,16 +2453,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
- cpr->has_more_work = cpr2->has_more_work;
+ cpr->has_more_work |= cpr2->has_more_work;
} else {
bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (cpr->has_more_work)
- break;
}
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
- cpr->cp_raw_cons = raw_cons;
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+ if (raw_cons != cpr->cp_raw_cons) {
+ cpr->cp_raw_cons = raw_cons;
+ BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
+ }
return work_done;
}
@@ -4170,6 +4161,7 @@ static int bnxt_hwrm_to_stderr(u32 hwrm_err)
case HWRM_ERR_CODE_NO_BUFFER:
return -ENOMEM;
case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
@@ -5069,10 +5061,8 @@ vnic_mru:
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
{
- u32 rc = 0;
-
if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input req = {0};
@@ -5080,10 +5070,9 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
req.vnic_id =
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
- return rc;
}
static void bnxt_hwrm_vnic_free(struct bnxt *bp)
@@ -5200,14 +5189,13 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
{
u16 i;
- u32 rc = 0;
struct hwrm_ring_grp_free_input req = {0};
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
- return 0;
+ return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@ -5218,12 +5206,10 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
req.ring_group_id =
cpu_to_le32(bp->grp_info[i].fw_grp_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
@@ -5847,8 +5833,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int
@@ -5869,8 +5854,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (rc)
return rc;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
@@ -6030,7 +6014,6 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
{
struct hwrm_func_vf_cfg_input req = {0};
u32 flags;
- int rc;
if (!BNXT_NEW_RM(bp))
return 0;
@@ -6047,8 +6030,8 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6057,7 +6040,6 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
{
struct hwrm_func_cfg_input req = {0};
u32 flags;
- int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, stats, vnics);
@@ -6075,8 +6057,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6315,16 +6297,16 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
- int rc = 0, i;
struct hwrm_stat_ctx_free_input req = {0};
+ int i;
if (!bp->bnapi)
- return 0;
+ return;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- return 0;
+ return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
@@ -6336,14 +6318,13 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
}
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
@@ -6548,8 +6529,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
__le64 *pg_dir;
u32 flags = 0;
u8 *pg_attr;
- int i, rc;
u32 ena;
+ int i;
if (!ctx)
return 0;
@@ -6636,8 +6617,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -7341,7 +7321,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
- int rc;
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_port_qstats_input req = {0};
@@ -7352,8 +7331,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
@@ -7507,7 +7485,6 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -7518,14 +7495,12 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
else
return -EINVAL;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
@@ -7537,8 +7512,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
if (size == 128)
req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
@@ -8796,6 +8770,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -8891,14 +8866,12 @@ int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
{
struct hwrm_wol_filter_free_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
req.port_id = cpu_to_le16(bp->pf.port_id);
req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
req.wol_filter_id = bp->wol_filter_id;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
@@ -10982,13 +10955,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
struct bnxt *bp = netdev_priv(dev);
if (netif_running(dev))
- bnxt_close_nic(bp, false, false);
+ bnxt_close_nic(bp, true, false);
dev->mtu = new_mtu;
bnxt_set_ring_params(bp);
if (netif_running(dev))
- return bnxt_open_nic(bp, false, false);
+ return bnxt_open_nic(bp, true, false);
return 0;
}
@@ -11456,6 +11429,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true);
+ if (BNXT_PF(bp))
+ devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_dl_unregister(bp);
@@ -11755,27 +11730,22 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
{
struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
+ u64 qword;
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN\n");
+ qword = pci_get_dsn(pdev);
+ if (!qword) {
+ netdev_info(bp->dev, "Unable to read adapter's DSN\n");
return -EOPNOTSUPP;
}
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
+ put_unaligned_le64(qword, dsn);
+
bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -11783,9 +11753,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index cabef0b4f5fb..5adc25f0ecb8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,8 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.1"
+/* DO NOT CHANGE DRV_VER_* defines
+ * FIXME: Delete them
+ */
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
#define DRV_VER_UPD 1
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fb6f30d0d1d0..e50c67984447 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -39,8 +39,8 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
- int rc = 0, i;
u8 *pri2cos;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
@@ -56,8 +56,7 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
@@ -93,8 +92,8 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
{
struct hwrm_queue_cos2bw_cfg_input req = {0};
struct bnxt_cos2bw_cfg cos2bw;
- int rc = 0, i;
void *data;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
@@ -128,8 +127,7 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
req.unused_0 = 0;
}
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
@@ -236,7 +234,6 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
bool need_q_remap = false;
- int rc;
if (!my_ets)
return -EINVAL;
@@ -267,15 +264,11 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
}
if (need_q_remap)
- rc = bnxt_queue_remap(bp, tc_mask);
+ bnxt_queue_remap(bp, tc_mask);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
-
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index e8fc1671c581..677bab95b937 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1236,7 +1236,6 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
@@ -2007,8 +2006,8 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_install_update_input install = {0};
const struct firmware *fw;
- int rc, hwrm_err = 0;
u32 item_len;
+ int rc = 0;
u16 index;
bnxt_hwrm_fw_set_time(bp);
@@ -2052,15 +2051,14 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
memcpy(kmem, fw->data, fw->size);
modify.host_src_addr = cpu_to_le64(dma_handle);
- hwrm_err = hwrm_send_message(bp, &modify,
- sizeof(modify),
- FLASH_PACKAGE_TIMEOUT);
+ rc = hwrm_send_message(bp, &modify, sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
dma_handle);
}
}
release_firmware(fw);
- if (rc || hwrm_err)
+ if (rc)
goto err_exit;
if ((install_type & 0xffff) == 0)
@@ -2069,20 +2067,19 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock);
- hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (hwrm_err) {
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
if (resp->error_code && error_code ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
- hwrm_err = _hwrm_send_message(bp, &install,
- sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
+ rc = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
}
- if (hwrm_err)
+ if (rc)
goto flash_pkg_exit;
}
@@ -2094,7 +2091,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
- if (hwrm_err == -EACCES)
+ if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
@@ -2607,7 +2604,7 @@ static int bnxt_set_phys_id(struct net_device *dev,
struct bnxt_led_cfg *led_cfg;
u8 led_state;
__le16 duration;
- int i, rc;
+ int i;
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
@@ -2633,8 +2630,7 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_blink_off = duration;
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
@@ -3473,6 +3469,12 @@ void bnxt_ethtool_free(struct bnxt *bp)
}
const struct ethtool_ops bnxt_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2aba1e02a8f4..6ea3df6da18c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -138,7 +138,6 @@ static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
@@ -149,8 +148,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 9bec256b0934..523bf4be43cc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -279,7 +279,8 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
/* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
* smac (6 bytes) if rewrite of both is specified, otherwise either
@@ -299,6 +300,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_types_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -491,7 +495,8 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->tun_mask.tp_src = match.mask->src;
}
- return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action,
+ tc_flow_cmd->common.extack);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 6f2faf81c1ae..4b5c8fd76a51 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -219,7 +219,6 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 80feb20a2e53..c476f13d0eaf 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -686,10 +686,6 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
* always generate an interrupt either after MBDONE packets have been
* transmitted, or when the ring is empty.
*/
- if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
- ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
- ec->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
@@ -879,7 +875,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
- strlcpy(info->version, "v2.0", sizeof(info->version));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1114,6 +1109,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.begin = bcmgenet_begin,
.complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 88466255bf66..ff98a82b7bc4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -96,11 +96,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
#define DRV_MODULE_NAME "tg3"
+/* DO NOT UPDATE TG3_*_NUM defines */
#define TG3_MAJ_NUM 3
#define TG3_MIN_NUM 137
-#define DRV_MODULE_VERSION \
- __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 11, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -222,13 +220,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-static char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
-
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
@@ -12317,7 +12311,6 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct tg3 *tp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
@@ -14160,6 +14153,11 @@ static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static const struct ethtool_ops tg3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_drvinfo = tg3_get_drvinfo,
.get_regs_len = tg3_get_regs_len,
.get_regs = tg3_get_regs,
@@ -17625,8 +17623,6 @@ static int tg3_init_one(struct pci_dev *pdev,
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
- printk_once(KERN_INFO "%s\n", version);
-
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index d6588502a050..cc80bbbefe87 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3842,9 +3842,6 @@ bnad_module_init(void)
{
int err;
- pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
- BNAD_VERSION);
-
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
@@ -3869,6 +3866,5 @@ module_exit(bnad_module_exit);
MODULE_AUTHOR("Brocade");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
-MODULE_VERSION(BNAD_VERSION);
MODULE_FIRMWARE(CNA_FW_FILE_CT);
MODULE_FIRMWARE(CNA_FW_FILE_CT2);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 492a02d54f14..627a93ce38ab 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -64,8 +64,6 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.25.1"
-
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
#define BNAD_INTX_TX_IB_BITMASK 0x1
@@ -253,7 +251,7 @@ struct bnad_rx_unmap_q {
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
+ struct bnad_rx_unmap unmap[] ____cacheline_aligned;
};
#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index b764c9ff9ad1..588c4804d10a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -284,7 +284,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
unsigned long flags;
strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -1116,6 +1115,9 @@ out:
}
static const struct ethtool_ops bnad_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = bnad_get_drvinfo,
.get_wol = bnad_get_wol,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index b821c9e1604c..81ff9ac73f9a 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -13,6 +13,9 @@
#define DRV_NAME "cavium_ptp"
#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_88XX_PTP 0xA10C
+#define PCI_SUBSYS_DEVID_81XX_PTP 0XA20C
+#define PCI_SUBSYS_DEVID_83XX_PTP 0xA30C
#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
#define PCI_PTP_BAR_NO 0
@@ -321,7 +324,12 @@ static void cavium_ptp_remove(struct pci_dev *pdev)
}
static const struct pci_device_id cavium_ptp_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_88XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_81XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_83XX_PTP) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index abe5d0dac851..16eebfc52109 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -442,7 +442,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio");
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
@@ -459,7 +458,6 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio_vf");
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
@@ -3099,7 +3097,17 @@ static int lio_set_fecparam(struct net_device *netdev,
return 0;
}
+#define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \
+ ETHTOOL_COALESCE_MAX_FRAMES | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
+
static const struct ethtool_ops lio_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
.get_link_ksettings = lio_get_link_ksettings,
.set_link_ksettings = lio_set_link_ksettings,
.get_fecparam = lio_get_fecparam,
@@ -3130,6 +3138,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
.get_link_ksettings = lio_get_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_vf_drvinfo,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab05b5534ea..a8d9ec927627 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -39,7 +39,6 @@
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(LIQUIDIO_VERSION);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
@@ -1414,13 +1413,6 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
dev_id);
}
- if (!ret)
- dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
- OCTEON_MAJOR_REV(oct),
- OCTEON_MINOR_REV(oct),
- octeon_get_conf(oct)->card_name,
- LIQUIDIO_VERSION);
-
return ret;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7a77544a54f5..bbd9bfa4a989 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -32,7 +32,6 @@
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(LIQUIDIO_VERSION);
static int debug = -1;
module_param(debug, int, 0644);
@@ -2352,8 +2351,8 @@ static int octeon_device_init(struct octeon_device *oct)
}
atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
- dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
- LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
+ dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
+ oct->sriov_info.rings_per_vf);
/* Setup the interrupt handler and record the INT SUM register address*/
if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index a5e0e9f17959..4da90757cd3f 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -25,17 +25,11 @@
#include "octeon_config.h"
-#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
#define LIQUIDIO_BASE_MINOR_VERSION 7
#define LIQUIDIO_BASE_MICRO_VERSION 2
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
-#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
-#define LIQUIDIO_VERSION LIQUIDIO_PACKAGE \
- __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
- __stringify(LIQUIDIO_BASE_MINOR_VERSION) \
- "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
struct lio_version {
u16 major;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index dfc77507b159..0d2831d10f65 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -127,7 +127,7 @@ struct octeon_pci_console_desc {
u32 pad;
/* must be 64 bit aligned here... */
/* Array of addresses of octeon_pci_console structures */
- u64 console_addr_array[0];
+ u64 console_addr_array[];
/* Implicit storage for console_addr_array */
};
@@ -840,17 +840,11 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
return -EINVAL;
}
- if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
- LIQUIDIO_PACKAGE, h->version);
- return -EINVAL;
- }
-
- if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version,
strlen(LIQUIDIO_BASE_VERSION))) {
dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
LIQUIDIO_BASE_VERSION,
- h->version + strlen(LIQUIDIO_PACKAGE));
+ h->version);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index e9575887a4f8..9d868403d86c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -28,7 +28,6 @@
#include <asm/octeon/cvmx-agl-defs.h>
#define DRV_NAME "octeon_mgmt"
-#define DRV_VERSION "2.0"
#define DRV_DESCRIPTION \
"Cavium Networks Octeon MII (management) port Network Driver"
@@ -1340,9 +1339,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1517,7 +1513,6 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
if (result)
goto err;
- dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
return 0;
err:
@@ -1574,4 +1569,3 @@ module_exit(octeon_mgmt_mod_exit);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5e0b16bb95a0..83dabcffc789 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -16,7 +16,6 @@
#include "../common/cavium_ptp.h"
#define DRV_NAME "nicvf"
-#define DRV_VERSION "1.0"
struct nicvf_stat {
char name[ETH_GSTRING_LEN];
@@ -192,7 +191,6 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 016957285f99..b4b33368698f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -126,8 +126,7 @@ static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
- int timeout = NIC_MBOX_MSG_TIMEOUT;
- int sleep = 10;
+ unsigned long timeout;
int ret = 0;
mutex_lock(&nic->rx_mode_mtx);
@@ -137,6 +136,7 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
nicvf_write_to_mbx(nic, mbx);
+ timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
if (nic->pf_nacked) {
@@ -146,11 +146,10 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
ret = -EINVAL;
break;
}
- msleep(sleep);
+ usleep_range(8000, 10000);
if (nic->pf_acked)
break;
- timeout -= sleep;
- if (!timeout) {
+ if (time_after(jiffies, timeout)) {
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index bc2427c49b89..2460451fc48f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -100,8 +100,8 @@
* RED accepts pkt if unused CQE < 2304 & >= 2560
* DROPs pkts if unused CQE < 2304
*/
-#define RQ_PASS_CQ_LVL 192ULL
-#define RQ_DROP_CQ_LVL 184ULL
+#define RQ_PASS_CQ_LVL 224ULL
+#define RQ_DROP_CQ_LVL 216ULL
/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 94b9482f14a5..6475060649e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -55,7 +55,6 @@
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
-#define DRV_VERSION "2.2"
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 0ccdde366ae1..99736796e1a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,7 +429,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = dev->ml_priv;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -794,6 +793,9 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
}
static const struct ethtool_ops t1_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -984,8 +986,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adapter *adapter = NULL;
struct port_info *pi;
- pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 883cfa9c4b6d..42c6e9379882 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -105,7 +105,6 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -1629,7 +1628,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
spin_unlock(&adapter->stats_lock);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
@@ -2106,6 +2104,7 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -3210,8 +3209,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adapter *adapter = NULL;
struct port_info *pi;
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
-
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
index 165bfb91487a..b4b2547efc86 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/version.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -34,8 +34,6 @@
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
-/* Driver version */
-#define DRV_VERSION "1.1.5-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 8b7d156f79d3..e46a14f44a6f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -640,6 +640,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
+ ULP_CRYPTO_KTLS_INLINE = 1 << 3,
};
struct rx_sw_desc;
@@ -1485,9 +1486,8 @@ static inline unsigned int qtimer_val(const struct adapter *adap,
return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
}
-/* driver version & name used for ethtool_drvinfo */
+/* driver name used for ethtool_drvinfo */
extern char cxgb4_driver_name[];
-extern const char cxgb4_driver_version[];
void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index fe883cb1a7af..ebed99f3d4cf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3409,6 +3409,41 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
+ seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
+ seq_printf(seq, "Tx connection created: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_open));
+ seq_printf(seq, "Tx connection failed: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail));
+ seq_printf(seq, "Tx connection closed: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_close));
+ seq_printf(seq, "Packets passed for encryption : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets));
+ seq_printf(seq, "Bytes passed for encryption : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes));
+ seq_printf(seq, "Tx records send: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_send_records));
+ seq_printf(seq, "Tx partial start of records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts));
+ seq_printf(seq, "Tx partial middle of records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts));
+ seq_printf(seq, "Tx partial end of record: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts));
+ seq_printf(seq, "Tx complete records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts));
+ seq_printf(seq, "TX trim pkts : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "Tx out of order packets: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_ooo));
+ seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data));
+ seq_printf(seq, "Tx drop not synced packets: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data));
+ seq_printf(seq, "Tx drop bypass req: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req));
+#endif
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index c837382ee522..398ade42476c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -170,15 +170,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
u32 exprom_vers;
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -1580,6 +1576,10 @@ static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
.get_fecparam = get_fecparam,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 649842a8aa28..75fde0d4d493 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -90,11 +90,6 @@
char cxgb4_driver_name[] = KBUILD_MODNAME;
-#ifdef DRV_VERSION
-#undef DRV_VERSION
-#endif
-#define DRV_VERSION "2.0.0-ko"
-const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
@@ -137,7 +132,6 @@ const char cxgb4_driver_version[] = DRV_VERSION;
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
@@ -3626,8 +3620,6 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
struct adapter *adapter = netdev2adap(dev);
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -5381,12 +5373,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+ u32 i, n10g = 0, qidx = 0, n1g = 0;
+ u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
- u32 i, n10g = 0, qidx = 0;
-#ifndef CONFIG_CHELSIO_T4_DCB
- int q10g = 0;
-#endif
+ u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
@@ -5424,44 +5415,50 @@ static int cfg_queues(struct adapter *adap)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
+
+ /* We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g)
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
+
+ n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
+ q1g = 8;
if (adap->params.nports * 8 > avail_eth_qsets) {
dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
avail_eth_qsets, adap->params.nports * 8);
return -ENOMEM;
}
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
+ if (adap->params.nports * ncpus < avail_eth_qsets)
+ q10g = max(8U, ncpus);
+ else
+ q10g = max(8U, q10g);
- pi->first_qset = qidx;
- pi->nqsets = is_kdump_kernel() ? 1 : 8;
- qidx += pi->nqsets;
- }
-#else /* !CONFIG_CHELSIO_T4_DCB */
- /* We default to 1 queue per non-10G port and up to # of cores queues
- * per 10G port.
- */
- if (n10g)
- q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
- if (q10g > netif_get_num_default_rss_queues())
- q10g = netif_get_num_default_rss_queues();
+ while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+ q10g--;
- if (is_kdump_kernel())
+#else /* !CONFIG_CHELSIO_T4_DCB */
+ q1g = 1;
+ q10g = min(q10g, ncpus);
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+ if (is_kdump_kernel()) {
q10g = 1;
+ q1g = 1;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
qidx += pi->nqsets;
}
-#endif /* !CONFIG_CHELSIO_T4_DCB */
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
@@ -5473,7 +5470,7 @@ static int cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
num_ulds = adap->num_uld + adap->num_ofld_uld;
- i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+ i = min_t(u32, MAX_OFLD_QSETS, ncpus);
avail_uld_qsets = roundup(i, adap->params.nports);
if (avail_qsets < num_ulds * adap->params.nports) {
adap->params.offload = 0;
@@ -6081,8 +6078,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, err;
u32 whoami;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
-
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
/* Just info, some other driver may have claimed the device. */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index bb5513bdd293..cc46277e98de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -544,7 +544,8 @@ static bool valid_pedit_action(struct net_device *dev,
}
int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_action *actions)
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry *act;
bool act_redir = false;
@@ -552,6 +553,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_vlan = false;
int i;
+ if (!flow_action_basic_hw_stats_types_check(actions, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
@@ -642,7 +646,7 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
struct filter_ctx ctx;
int fidx, ret;
- if (cxgb4_validate_flow_actions(dev, &rule->action))
+ if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index e132516e9868..0a30c96b81ff 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -112,7 +112,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_action *actions);
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack);
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1b7681a4eb32..d80dee4d316d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -286,7 +286,8 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
}
ret = cxgb4_validate_flow_actions(dev,
- &cls_matchall->rule->action);
+ &cls_matchall->rule->action,
+ extack);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index cce33d279094..e65b52375dd8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -662,6 +662,25 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
+ * @adap: adapter info
+ * @enable: 1 to enable / 0 to disable ktls settings.
+ */
+static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
+{
+ u32 params = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_TX_HW) |
+ FW_PARAMS_PARAM_Y_V(enable));
+ int ret = 0;
+
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &params, &params);
+ /* if fw returns failure, clear the ktls flag */
+ if (ret)
+ adap->params.crypto &= ~ULP_CRYPTO_KTLS_INLINE;
+}
+#endif
+
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
@@ -698,6 +717,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
}
if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ /* send mbox to enable ktls related settings. */
+ if (type == CXGB4_ULD_CRYPTO &&
+ (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+ cxgb4_set_ktls_feature(adap, 1);
+#endif
if (adap->uld[type].add)
goto free_irq;
ret = setup_sge_txq_uld(adap, type, p);
@@ -750,6 +775,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
cxgb4_shutdown_uld_adapter(adap, type);
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ /* send mbox to disable ktls related settings. */
+ if (type == CXGB4_ULD_CRYPTO &&
+ (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+ cxgb4_set_ktls_feature(adap, 0);
+#endif
}
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d9d27bc1ae67..03b9bdc812cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -357,6 +357,26 @@ struct chcr_stats_debug {
atomic_t tls_pdu_tx;
atomic_t tls_pdu_rx;
atomic_t tls_key;
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ atomic64_t ktls_tx_connection_open;
+ atomic64_t ktls_tx_connection_fail;
+ atomic64_t ktls_tx_connection_close;
+ atomic64_t ktls_tx_send_records;
+ atomic64_t ktls_tx_end_pkts;
+ atomic64_t ktls_tx_start_pkts;
+ atomic64_t ktls_tx_middle_pkts;
+ atomic64_t ktls_tx_retransmit_pkts;
+ atomic64_t ktls_tx_complete_pkts;
+ atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_encrypted_packets;
+ atomic64_t ktls_tx_encrypted_bytes;
+ atomic64_t ktls_tx_ctx;
+ atomic64_t ktls_tx_ooo;
+ atomic64_t ktls_tx_skip_no_sync_data;
+ atomic64_t ktls_tx_drop_no_sync_data;
+ atomic64_t ktls_tx_drop_bypass_req;
+
+#endif
};
#define OCQ_WIN_OFFSET(pdev, vres) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 12c3354172cd..72b37a66c7d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -700,6 +700,17 @@ static char l2e_state(const struct l2t_entry *e)
}
}
+bool cxgb4_check_l2t_valid(struct l2t_entry *e)
+{
+ bool valid;
+
+ spin_lock(&e->lock);
+ valid = (e->state == L2T_STATE_VALID);
+ spin_unlock(&e->lock);
+ return valid;
+}
+EXPORT_SYMBOL(cxgb4_check_l2t_valid);
+
static int l2t_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 79665bd8f881..340fecb28a13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -122,6 +122,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
u8 port, u8 *dmac);
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+bool cxgb4_check_l2t_valid(struct l2t_entry *e);
extern const struct file_operations t4_l2t_fops;
#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 97cda501e7e8..a412b641e52c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1412,6 +1412,11 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
#endif /* CHELSIO_IPSEC_INLINE */
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (skb->decrypted)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_TLS_DEVICE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7d874f03d6c5..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -47,6 +47,7 @@ enum {
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
@@ -705,6 +706,14 @@ struct cpl_set_tcb_field {
__be64 val;
};
+struct cpl_set_tcb_field_core {
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
/* cpl_set_tcb_field.word_cookie fields */
#define TCB_WORD_S 0
#define TCB_WORD_V(x) ((x) << TCB_WORD_S)
@@ -1462,6 +1471,16 @@ struct cpl_tx_data {
#define TX_FORCE_S 13
#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+#define TX_DATA_MSS_S 16
+#define TX_DATA_MSS_M 0xFFFF
+#define TX_DATA_MSS_V(x) ((x) << TX_DATA_MSS_S)
+#define TX_DATA_MSS_G(x) (((x) >> TX_DATA_MSS_S) & TX_DATA_MSS_M)
+
+#define TX_LENGTH_S 0
+#define TX_LENGTH_M 0xFFFF
+#define TX_LENGTH_V(x) ((x) << TX_LENGTH_S)
+#define TX_LENGTH_G(x) (((x) >> TX_LENGTH_S) & TX_LENGTH_M)
+
#define T6_TX_FORCE_S 20
#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
@@ -1471,6 +1490,15 @@ struct cpl_tx_data {
#define TX_SHOVE_S 14
#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
+#define TX_SHOVE_F TX_SHOVE_V(1U)
+
+#define TX_BYPASS_S 21
+#define TX_BYPASS_V(x) ((x) << TX_BYPASS_S)
+#define TX_BYPASS_F TX_BYPASS_V(1U)
+
+#define TX_PUSH_S 22
+#define TX_PUSH_V(x) ((x) << TX_PUSH_S)
+#define TX_PUSH_F TX_PUSH_V(1U)
#define TX_ULP_MODE_S 10
#define TX_ULP_MODE_M 0x7
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 1b9afb192f7f..50232e063f49 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -35,6 +35,11 @@
#ifndef __T4_TCB_H
#define __T4_TCB_H
+#define TCB_L2T_IX_W 0
+#define TCB_L2T_IX_S 12
+#define TCB_L2T_IX_M 0xfffULL
+#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
@@ -45,11 +50,6 @@
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
-#define TCB_RQ_START_W 30
-#define TCB_RQ_START_S 0
-#define TCB_RQ_START_M 0x3ffffffULL
-#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
-
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
@@ -59,6 +59,11 @@
#define TCB_RSS_INFO_M 0x3ffULL
#define TCB_RSS_INFO_V(x) ((x) << TCB_RSS_INFO_S)
+#define TCB_T_STATE_W 3
+#define TCB_T_STATE_S 16
+#define TCB_T_STATE_M 0xfULL
+#define TCB_T_STATE_V(x) ((x) << TCB_T_STATE_S)
+
#define TCB_TIMESTAMP_W 5
#define TCB_TIMESTAMP_S 0
#define TCB_TIMESTAMP_M 0xffffffffULL
@@ -69,13 +74,60 @@
#define TCB_RTT_TS_RECENT_AGE_M 0xffffffffULL
#define TCB_RTT_TS_RECENT_AGE_V(x) ((x) << TCB_RTT_TS_RECENT_AGE_S)
+#define TCB_T_RTSEQ_RECENT_W 7
+#define TCB_T_RTSEQ_RECENT_S 0
+#define TCB_T_RTSEQ_RECENT_M 0xffffffffULL
+#define TCB_T_RTSEQ_RECENT_V(x) ((x) << TCB_T_RTSEQ_RECENT_S)
+
+#define TCB_TX_MAX_W 9
+#define TCB_TX_MAX_S 0
+#define TCB_TX_MAX_M 0xffffffffULL
+#define TCB_TX_MAX_V(x) ((x) << TCB_TX_MAX_S)
+
#define TCB_SND_UNA_RAW_W 10
+#define TCB_SND_UNA_RAW_S 0
+#define TCB_SND_UNA_RAW_M 0xfffffffULL
+#define TCB_SND_UNA_RAW_V(x) ((x) << TCB_SND_UNA_RAW_S)
+
+#define TCB_SND_NXT_RAW_W 10
+#define TCB_SND_NXT_RAW_S 28
+#define TCB_SND_NXT_RAW_M 0xfffffffULL
+#define TCB_SND_NXT_RAW_V(x) ((x) << TCB_SND_NXT_RAW_S)
+
+#define TCB_SND_MAX_RAW_W 11
+#define TCB_SND_MAX_RAW_S 24
+#define TCB_SND_MAX_RAW_M 0xfffffffULL
+#define TCB_SND_MAX_RAW_V(x) ((x) << TCB_SND_MAX_RAW_S)
+
+#define TCB_RCV_NXT_W 16
+#define TCB_RCV_NXT_S 10
+#define TCB_RCV_NXT_M 0xffffffffULL
+#define TCB_RCV_NXT_V(x) ((x) << TCB_RCV_NXT_S)
+
+#define TCB_RCV_WND_W 17
+#define TCB_RCV_WND_S 10
+#define TCB_RCV_WND_M 0xffffffULL
+#define TCB_RCV_WND_V(x) ((x) << TCB_RCV_WND_S)
+
#define TCB_RX_FRAG2_PTR_RAW_W 27
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
+#define TCB_RQ_START_W 30
+#define TCB_RQ_START_S 0
+#define TCB_RQ_START_M 0x3ffffffULL
+#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
+
#define TF_RX_PDU_OUT_S 49
#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
+#define TF_CORE_BYPASS_S 63
+#define TF_CORE_BYPASS_V(x) ((__u64)(x) << TF_CORE_BYPASS_S)
+#define TF_CORE_BYPASS_F TF_CORE_BYPASS_V(1)
+
+#define TF_NON_OFFLOAD_S 1
+#define TF_NON_OFFLOAD_V(x) ((x) << TF_NON_OFFLOAD_S)
+#define TF_NON_OFFLOAD_F TF_NON_OFFLOAD_V(1)
+
#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 703effc00a05..68fe734b9b37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1205,6 +1205,7 @@ enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLS_INLINE = 0x00000002,
FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_TX_TLS_HW = 0x00000008,
};
enum fw_caps_config_fcoe {
@@ -1328,6 +1329,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
+ FW_PARAMS_PARAM_DEV_KTLS_TX_HW = 0x31,
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index f4d41f968afa..9cc3541a7e1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -55,7 +55,6 @@
/*
* Generic information about the driver.
*/
-#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
/*
@@ -1556,7 +1555,6 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
struct adapter *adapter = netdev2adap(dev);
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1921,6 +1919,8 @@ static void cxgb4vf_get_wol(struct net_device *dev,
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = cxgb4vf_get_link_ksettings,
.get_fecparam = cxgb4vf_get_fecparam,
.get_drvinfo = cxgb4vf_get_drvinfo,
@@ -2934,12 +2934,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
unsigned int pf;
/*
- * Print our driver banner the first time we're called to initialize a
- * device.
- */
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
-
- /*
* Initialize generic PCI device state.
*/
err = pci_enable_device(pdev);
@@ -3454,7 +3448,6 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
static struct pci_driver cxgb4vf_driver = {
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 21034536c9c5..854d87e1125c 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -35,7 +35,6 @@
*/
#define DRV_NAME "libcxgb"
-#define DRV_VERSION "1.0.0-ko"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/kernel.h>
@@ -530,5 +529,4 @@ EXPORT_SYMBOL(cxgbi_tagmask_set);
MODULE_AUTHOR("Chelsio Communications");
MODULE_DESCRIPTION("Chelsio common library");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index f37c9a08c4cf..9f5e5ec69991 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -24,7 +24,6 @@
#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
-#define DRV_MODULE_VERSION "0.1"
#define RX_QUEUE_ENTRIES 64
#define TX_QUEUE_ENTRIES 8
@@ -691,7 +690,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 0dd64acd2a3f..18f3aeb88f22 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,8 +33,6 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.53"
-#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index ebd5c2cf1efe..4d8e0aa447fb 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -147,7 +147,6 @@ static void enic_get_drvinfo(struct net_device *netdev,
return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
@@ -324,25 +323,6 @@ static int enic_coalesce_valid(struct enic *enic,
u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
ec->rx_coalesce_usecs_low);
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -EINVAL;
-
if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
ec->tx_coalesce_usecs)
return -EINVAL;
@@ -636,6 +616,10 @@ static int enic_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops enic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_RX_USECS_LOW |
+ ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 3fc858b2c87b..cd5fe4f6b54c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -80,7 +80,6 @@ static const struct pci_device_id enic_id_table[] = {
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
#define ENIC_LARGE_PKT_THRESHOLD 1000
@@ -3055,8 +3054,6 @@ static struct pci_driver enic_driver = {
static int __init enic_init_module(void)
{
- pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
-
return pci_register_driver(&enic_driver);
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index f30fa8e6ef80..5bff5c2be88b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -44,7 +44,6 @@
#include "gemini.h"
#define DRV_NAME "gmac-gemini"
-#define DRV_VERSION "1.0"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int debug = -1;
@@ -2204,7 +2203,6 @@ static void gmac_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
}
@@ -2224,6 +2222,8 @@ static const struct net_device_ops gmac_351x_ops = {
};
static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_sset_count = gmac_get_sset_count,
.get_strings = gmac_get_strings,
.get_ethtool_stats = gmac_get_ethtool_stats,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e94ae9b94dbf..7f7705138262 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -42,7 +42,6 @@
#define DM9000_PHY 0x40 /* PHY address 0x01 */
#define CARDNAME "dm9000"
-#define DRV_VERSION "1.31"
/*
* Transmit timeout, default 5 seconds.
@@ -543,7 +542,6 @@ static void dm9000_get_drvinfo(struct net_device *dev,
struct board_info *dm = to_dm9000_board(dev);
strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 42b798a3fad4..592454f444ce 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -30,7 +30,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "de2104x"
-#define DRV_VERSION "0.7"
#define DRV_RELDATE "Mar 17, 2004"
#include <linux/module.h>
@@ -52,14 +51,9 @@
#include <linux/uaccess.h>
#include <asm/unaligned.h>
-/* These identify the driver base version and may not be removed. */
-static char version[] =
-"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
-
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
static int debug = -1;
module_param (debug, int, 0);
@@ -1603,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
struct de_private *de = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
@@ -1980,11 +1973,6 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_idx++;
-#ifndef MODULE
- if (board_idx == 0)
- pr_info("%s\n", version);
-#endif
-
/* allocate a new ethernet device structure, and fill in defaults */
dev = alloc_etherdev(sizeof(struct de_private));
if (!dev)
@@ -2196,9 +2184,6 @@ static struct pci_driver de_driver = {
static int __init de_init (void)
{
-#ifdef MODULE
- pr_info("%s\n", version);
-#endif
return pci_register_driver(&de_driver);
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 32d470d4122a..c1884fc9ad32 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -56,8 +56,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "dmfe"
-#define DRV_VERSION "1.36.4"
-#define DRV_RELDATE "2002-01-17"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -280,10 +278,6 @@ enum dmfe_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int printed_version;
-static const char version[] =
- "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
-
static int dmfe_debug;
static unsigned char dmfe_media_mode = DMFE_AUTO;
static u32 dmfe_cr6_user_set;
@@ -364,9 +358,6 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
DMFE_DBUG(0, "dmfe_init_one()", 0);
- if (!printed_version++)
- pr_info("%s\n", version);
-
/*
* SPARC on-board DM910x chips should be handled by the main
* tulip driver, except for early DM9100s.
@@ -1081,7 +1072,6 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
struct dmfe_board_info *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -2177,7 +2167,6 @@ static struct pci_driver dmfe_driver = {
MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(debug, int, 0);
module_param(mode, byte, 0);
@@ -2204,9 +2193,6 @@ static int __init dmfe_init_module(void)
{
int rc;
- pr_info("%s\n", version);
- printed_version = 1;
-
DMFE_DBUG(0, "init_module() ", debug);
if (debug)
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index b458140aeaef..815907259048 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -381,7 +381,7 @@ struct mediatable {
unsigned has_reset:6;
u32 csr15dir;
u32 csr15val; /* 21143 NWay setting. */
- struct medialeaf mleaf[0];
+ struct medialeaf mleaf[];
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9e9d9eee29d9..48ea658aa1a6 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -12,13 +12,6 @@
#define pr_fmt(fmt) "tulip: " fmt
#define DRV_NAME "tulip"
-#ifdef CONFIG_TULIP_NAPI
-#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
-#else
-#define DRV_VERSION "1.1.15"
-#endif
-#define DRV_RELDATE "Feb 27, 2007"
-
#include <linux/module.h>
#include <linux/pci.h>
@@ -37,9 +30,6 @@
#include <asm/prom.h>
#endif
-static char version[] =
- "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -109,7 +99,6 @@ static int csr0;
MODULE_AUTHOR("The Linux Kernel Team");
MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(tulip_debug, int, 0);
module_param(max_interrupt_work, int, 0);
module_param(rx_copybreak, int, 0);
@@ -868,7 +857,6 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct tulip_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1314,11 +1302,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned int eeprom_missing = 0;
unsigned int force_csr0 = 0;
-#ifndef MODULE
- if (tulip_debug > 0)
- printk_once(KERN_INFO "%s", version);
-#endif
-
board_idx++;
/*
@@ -1800,14 +1783,13 @@ static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
void __iomem *ioaddr = tp->base_addr;
if (tp->flags & COMET_PM) {
-
unsigned int tmp;
-
+
tmp = ioread32(ioaddr + CSR18);
tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
tmp |= comet_csr18_pm_mode;
iowrite32(tmp, ioaddr + CSR18);
-
+
/* Set the Wake-up Control/Status Register to the given WOL options*/
tmp = ioread32(ioaddr + CSR13);
tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
@@ -1969,10 +1951,6 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
-#ifdef MODULE
- pr_info("%s", version);
-#endif
-
if (!csr0) {
pr_warn("tulip: unknown CPU architecture, using default csr0\n");
/* default to 8 longword cache line alignment */
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 117ffe08800d..f726436b1985 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -7,8 +7,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "uli526x"
-#define DRV_VERSION "0.9.3"
-#define DRV_RELDATE "2005-7-29"
#include <linux/module.h>
@@ -196,10 +194,6 @@ enum uli526x_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int printed_version;
-static const char version[] =
- "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
-
static int uli526x_debug;
static unsigned char uli526x_media_mode = ULI526X_AUTO;
static u32 uli526x_cr6_user_set;
@@ -282,9 +276,6 @@ static int uli526x_init_one(struct pci_dev *pdev,
ULI526X_DBUG(0, "uli526x_init_one()", 0);
- if (!printed_version++)
- pr_info("%s\n", version);
-
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
@@ -972,7 +963,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct uli526x_board_info *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1799,9 +1789,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
static int __init uli526x_init_module(void)
{
- pr_info("%s\n", version);
- printed_version = 1;
-
ULI526X_DBUG(0, "init_module() ", debug);
if (debug)
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 7f136488e67c..4d5e4fa53023 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -47,9 +47,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "winbond-840"
-#define DRV_VERSION "1.01-e"
-#define DRV_RELDATE "Sep-11-2006"
-
/* Automatically extracted configuration info:
probe-func: winbond840_probe
@@ -139,16 +136,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#undef PKT_BUF_SZ /* tulip.h also defines this */
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-/* These identify the driver base version and may not be removed. */
-static const char version[] __initconst =
- "v" DRV_VERSION " (2.4 port) "
- DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
- " http://www.scyld.com/network/drivers.html\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
@@ -1385,7 +1375,6 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1650,7 +1639,6 @@ static struct pci_driver w840_driver = {
static int __init w840_init(void)
{
- printk(version);
return pci_register_driver(&w840_driver);
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 26c5da032b1e..643090555cc7 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -8,8 +8,6 @@
*/
#define DRV_NAME "DL2000/TC902x-based linux driver"
-#define DRV_VERSION "v1.19"
-#define DRV_RELDATE "2007/08/12"
#include "dl2k.h"
#include <linux/dma-mapping.h>
@@ -20,8 +18,6 @@
#define dr16(reg) ioread16(ioaddr + (reg))
#define dr8(reg) ioread8(ioaddr + (reg))
-static char version[] =
- KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
@@ -113,13 +109,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int err, irq;
void __iomem *ioaddr;
- static int version_printed;
void *ring_space;
dma_addr_t ring_dma;
- if (!version_printed++)
- printk ("%s", version);
-
err = pci_enable_device (pdev);
if (err)
return err;
@@ -1244,7 +1236,6 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index b91387c456ba..dc566fcc3ba9 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -23,9 +23,6 @@
*/
#define DRV_NAME "sundance"
-#define DRV_VERSION "1.2"
-#define DRV_RELDATE "11-Sep-2006"
-
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -101,11 +98,6 @@ static char *media[MAX_UNITS];
#include <linux/ethtool.h>
#include <linux/mii.h>
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
- " Written by Donald Becker\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
MODULE_LICENSE("GPL");
@@ -516,13 +508,6 @@ static int sundance_probe1(struct pci_dev *pdev,
#endif
int phy, phy_end, phy_idx = 0;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
if (pci_enable_device(pdev))
return -EIO;
pci_set_master(pdev);
@@ -1657,7 +1642,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -2010,10 +1994,6 @@ static struct pci_driver sundance_driver = {
static int __init sundance_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
return pci_register_driver(&sundance_driver);
}
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 5f8fa1145db6..057a508dd6e2 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -729,7 +729,6 @@ static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 8af6c0705ab3..030724484b49 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -8,7 +8,6 @@
#define _DNET_H
#define DRV_NAME "dnet"
-#define DRV_VERSION "0.9.1"
#define PFX DRV_NAME ": "
/* Register access macros */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf3e6f2892ff..6e9022083004 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -33,7 +33,6 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 022a54a1805b..d6ed1d943762 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -221,7 +221,6 @@ static void be_get_drvinfo(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
strlcpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
@@ -1409,6 +1408,9 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags)
}
const struct ethtool_ops be_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 56f59db6ebf2..a7ac23a6862b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,8 +21,7 @@
#include <net/busy_poll.h>
#include <net/vxlan.h>
-MODULE_VERSION(DRV_VER);
-MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
+MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
@@ -5949,8 +5948,6 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
struct net_device *netdev;
int status = 0;
- dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
-
status = pci_enable_device(pdev);
if (status)
goto do_none;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4572797f00d7..835b7816e372 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -30,7 +30,6 @@
#include "ftgmac100.h"
#define DRV_NAME "ftgmac100"
-#define DRV_VERSION "0.7"
/* Arbitrary values, I am not sure the HW has limits */
#define MAX_RX_QUEUE_ENTRIES 1024
@@ -1150,7 +1149,6 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
@@ -1757,9 +1755,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct device_node *np;
int err = 0;
- if (!pdev)
- return -ENODEV;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6c247cbbd23e..32cf54f0e35b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -23,7 +23,6 @@
#include "ftmac100.h"
#define DRV_NAME "ftmac100"
-#define DRV_VERSION "0.2"
#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
@@ -809,7 +808,6 @@ static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
@@ -1184,7 +1182,6 @@ static struct platform_driver ftmac100_driver = {
*****************************************************************************/
static int __init ftmac100_init(void)
{
- pr_info("Loading version " DRV_VERSION " ...\n");
return platform_driver_register(&ftmac100_driver);
}
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 84f10970299a..73e896a7d8fd 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -25,8 +25,6 @@
*/
#define DRV_NAME "fealnx"
-#define DRV_VERSION "2.52"
-#define DRV_RELDATE "Sep-11-2006"
static int debug; /* 1-> print debug message */
static int max_interrupt_work = 20;
@@ -91,11 +89,6 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
-
-
/* This driver was written to use PCI memory space, however some x86 systems
work only with I/O space accesses. */
#ifndef __alpha__
@@ -495,13 +488,6 @@ static int fealnx_init_one(struct pci_dev *pdev,
int bar = 1;
#endif
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
card_idx++;
sprintf(boardname, "fealnx%d", card_idx);
@@ -1809,7 +1795,6 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1950,11 +1935,6 @@ static struct pci_driver fealnx_driver = {
static int __init fealnx_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
-
return pci_register_driver(&fealnx_driver);
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index fd93d542f497..46039d80bb43 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,4 +1,5 @@
/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -123,7 +124,22 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define FSL_QMAN_MAX_OAL 127
/* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+ DPAA_A050385_ALIGN : 16)
+#else
#define DPAA_FD_DATA_ALIGNMENT 16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
/* The DPAA requires 256 bytes reserved and mapped for the SGT */
#define DPAA_SGT_SIZE 256
@@ -158,8 +174,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
+ + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#else
#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
dpaa_rx_extra_headroom)
+#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -180,7 +201,12 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
#define DPAA_BP_RAW_SIZE 4096
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+ ~(DPAA_A050385_ALIGN - 1))
+#else
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
static int dpaa_max_frm;
@@ -233,8 +259,20 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ if (is_valid_ether_addr(mac_addr)) {
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else {
+ eth_hw_addr_random(net_dev);
+ err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "Failed to set random MAC address\n");
+ return -EINVAL;
+ }
+ dev_info(dev, "Using random MAC address: %pM\n",
+ net_dev->dev_addr);
+ }
net_dev->ethtool_ops = &dpaa_ethtool_ops;
@@ -1192,7 +1230,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
buf_prefix_content.pass_time_stamp = true;
- buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+ buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
rx_p = &params.specific_params.rx_params;
rx_p->err_fqid = errq->fqid;
@@ -1662,6 +1700,8 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
return CHECKSUM_NONE;
}
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
+
/* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
* accommodate the shared info area of the skb.
@@ -1733,8 +1773,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
- SMP_CACHE_BYTES));
+ WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
dma_unmap_page(priv->rx_dma_dev, sg_addr,
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
@@ -2022,6 +2061,75 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
return 0;
}
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct sk_buff *new_skb, *skb = *s;
+ unsigned char *start, i;
+
+ /* check linear buffer alignment */
+ if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* linear buffers just need to have an aligned start */
+ if (!skb_is_nonlinear(skb))
+ return 0;
+
+ /* linear data size for nonlinear skbs needs to be aligned */
+ if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /* all fragments need to have aligned start addresses */
+ if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+ goto workaround;
+
+ /* all but last fragment need to have aligned sizes */
+ if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+ (i < skb_shinfo(skb)->nr_frags - 1))
+ goto workaround;
+ }
+
+ return 0;
+
+workaround:
+ /* copy all the skb content into a new linear buffer */
+ new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+ priv->tx_headroom);
+ if (!new_skb)
+ return -ENOMEM;
+
+ /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+ skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+ /* Workaround for DPAA_A050385 requires data start to be aligned */
+ start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+ if (start - new_skb->data != 0)
+ skb_reserve(new_skb, start - new_skb->data);
+
+ skb_put(new_skb, skb->len);
+ skb_copy_bits(skb, 0, new_skb->data, skb->len);
+ skb_copy_header(new_skb, skb);
+ new_skb->dev = skb->dev;
+
+ /* We move the headroom when we align it so we have to reset the
+ * network and transport header offsets relative to the new data
+ * pointer. The checksum offload relies on these offsets.
+ */
+ skb_set_network_header(new_skb, skb_network_offset(skb));
+ skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+ /* TODO: does timestamping need the result in the old skb? */
+ dev_kfree_skb(skb);
+ *s = new_skb;
+
+ return 0;
+}
+#endif
+
static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
@@ -2068,6 +2176,14 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
nonlinear = skb_is_nonlinear(skb);
}
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ if (dpaa_a050385_wa(net_dev, &skb))
+ goto enomem;
+ nonlinear = skb_is_nonlinear(skb);
+ }
+#endif
+
if (nonlinear) {
/* Just create a S/G fd based on the skb */
err = skb_to_sg_fd(priv, skb, &fd);
@@ -2741,9 +2857,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
- return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
- DPAA_FD_DATA_ALIGNMENT) :
- headroom;
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 66d150872d48..9db2a02fb531 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -106,19 +106,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- int len;
-
strlcpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- len = snprintf(drvinfo->version, sizeof(drvinfo->version),
- "%X", 0);
- len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%X", 0);
-
- if (len >= sizeof(drvinfo->fw_version)) {
- /* Truncated output */
- netdev_notice(net_dev, "snprintf() = %d\n", len);
- }
strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
@@ -536,7 +525,6 @@ static int dpaa_get_coalesce(struct net_device *dev,
c->rx_coalesce_usecs = period;
c->rx_max_coalesced_frames = thresh;
- c->use_adaptive_rx_coalesce = false;
return 0;
}
@@ -551,9 +539,6 @@ static int dpaa_set_coalesce(struct net_device *dev,
u8 thresh, prev_thresh;
int cpu, res;
- if (c->use_adaptive_rx_coalesce)
- return -EINVAL;
-
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -593,6 +578,8 @@ revert_values:
}
const struct ethtool_ops dpaa_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
.set_msglevel = dpaa_set_msglevel,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index f86283411f4d..2b43848e1363 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -42,16 +42,6 @@ config FSL_ENETC_PTP_CLOCK
If compiled as module (M), the module name is fsl-enetc-ptp.
-config FSL_ENETC_HW_TIMESTAMPING
- bool "ENETC hardware timestamping support"
- depends on FSL_ENETC || FSL_ENETC_VF
- help
- Enable hardware timestamping support on the Ethernet packets
- using the SO_TIMESTAMPING API. Because the RX BD ring dynamic
- allocation has not been supported and it is too expensive to use
- extended RX BDs if timestamping is not used, this option enables
- extended RX BDs in order to support hardware timestamping.
-
config FSL_ENETC_QOS
bool "ENETC hardware Time-sensitive Network support"
depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 1f79e36116a3..ccf2611f4a20 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -451,7 +451,7 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
i = rx_ring->next_to_use;
rx_swbd = &rx_ring->rx_swbd[i];
- rxbd = ENETC_RXBD(*rx_ring, i);
+ rxbd = enetc_rxbd(rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
/* try reuse page */
@@ -468,13 +468,12 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
rx_swbd++;
- rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rx_swbd = rx_ring->rx_swbd;
- rxbd = ENETC_RXBD(*rx_ring, 0);
}
}
@@ -488,7 +487,7 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -502,7 +501,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
lo = enetc_rd(hw, ENETC_SICTR0);
hi = enetc_rd(hw, ENETC_SICTR1);
- tstamp_lo = le32_to_cpu(rxbd->r.tstamp);
+ rxbd = enetc_rxbd_ext(rxbd);
+ tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo)
hi -= 1;
@@ -516,7 +516,7 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
{
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
#endif
/* TODO: hashing */
@@ -533,7 +533,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxbd->r.vlan_opt));
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
#endif
@@ -655,7 +655,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
cleaned_cnt -= count;
}
- rxbd = ENETC_RXBD(*rx_ring, i);
+ rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
if (!bd_status)
break;
@@ -670,12 +670,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_get_offloads(rx_ring, rxbd, skb);
cleaned_cnt++;
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
@@ -683,12 +681,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
bd_status = le32_to_cpu(rxbd->r.lstatus);
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
}
rx_ring->ndev->stats.rx_dropped++;
@@ -710,12 +706,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
cleaned_cnt++;
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
}
rx_byte_cnt += skb->len;
@@ -845,15 +839,19 @@ static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
enetc_free_txbdr(priv->tx_ring[i]);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
{
+ size_t size = sizeof(union enetc_rx_bd);
int err;
rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
if (!rxr->rx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+ if (extended)
+ size *= 2;
+
+ err = enetc_dma_alloc_bdr(rxr, size);
if (err) {
vfree(rxr->rx_swbd);
return err;
@@ -862,6 +860,7 @@ static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
rxr->next_to_clean = 0;
rxr->next_to_use = 0;
rxr->next_to_alloc = 0;
+ rxr->ext_en = extended;
return 0;
}
@@ -881,10 +880,11 @@ static void enetc_free_rxbdr(struct enetc_bdr *rxr)
static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
{
+ bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
int i, err;
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+ err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
if (err)
goto fail;
@@ -1174,9 +1174,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
rbmr = ENETC_RBMR_EN;
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
- rbmr |= ENETC_RBMR_BDS;
-#endif
+
+ if (rx_ring->ext_en)
+ rbmr |= ENETC_RBMR_BDS;
+
if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rbmr |= ENETC_RBMR_VTE;
@@ -1577,11 +1578,12 @@ int enetc_set_features(struct net_device *ndev,
return 0;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct hwtstamp_config config;
+ int ao;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -1597,6 +1599,7 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
+ ao = priv->active_offloads;
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
@@ -1606,6 +1609,11 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (netif_running(ndev) && ao != priv->active_offloads) {
+ enetc_close(ndev);
+ enetc_open(ndev);
+ }
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -1632,7 +1640,7 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (cmd == SIOCSHWTSTAMP)
return enetc_hwtstamp_set(ndev, rq);
if (cmd == SIOCGHWTSTAMP)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 9938f7a5fc0a..56c43f35b633 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -73,6 +73,7 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
+ bool ext_en; /* enable h/w descriptor extensions */
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
@@ -104,7 +105,37 @@ struct enetc_cbdr {
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
-#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
+{
+ int hw_idx = i;
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ hw_idx = 2 * i;
+#endif
+ return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd *rxbd,
+ int i)
+{
+ rxbd++;
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ rxbd++;
+#endif
+ if (unlikely(++i == rx_ring->bd_count))
+ rxbd = rx_ring->bd_base;
+
+ return rxbd;
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
+{
+ return ++rxbd;
+}
struct enetc_msg_swbd {
void *vaddr;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 301ee0dde02d..34bd1f3fb415 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -195,15 +195,21 @@ static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
static int enetc_get_sset_count(struct net_device *ndev, int sset)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int len;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(enetc_si_counters) +
- ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
- ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
- (enetc_si_is_pf(priv->si) ?
- ARRAY_SIZE(enetc_port_counters) : 0);
+ len = ARRAY_SIZE(enetc_si_counters) +
+ ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+ ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings;
- return -EOPNOTSUPP;
+ if (!enetc_si_is_pf(priv->si))
+ return len;
+
+ len += ARRAY_SIZE(enetc_port_counters);
+
+ return len;
}
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -568,7 +574,7 @@ static int enetc_get_ts_info(struct net_device *ndev,
info->phc_index = -1;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index da134e211c1a..2a6523136947 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -418,9 +418,6 @@ union enetc_rx_bd {
struct {
__le64 addr;
u8 reserved[8];
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
- u8 reserved1[16];
-#endif
} w;
struct {
__le16 inet_csum;
@@ -435,11 +432,11 @@ union enetc_rx_bd {
};
__le32 lstatus;
};
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ } r;
+ struct {
__le32 tstamp;
u8 reserved[12];
-#endif
- } r;
+ } ext;
};
#define ENETC_RXBD_LSTATUS_R BIT(30)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index fc0d7d99e9a1..4e4a49179f0b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -7,12 +7,6 @@
#include <linux/of_net.h>
#include "enetc_pf.h"
-#define ENETC_DRV_VER_MAJ 1
-#define ENETC_DRV_VER_MIN 0
-
-#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
- __stringify(ENETC_DRV_VER_MIN)
-static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC PF driver"
static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
@@ -803,11 +797,6 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
struct device_node *mdio_np;
int err;
- if (!np) {
- dev_err(priv->dev, "missing ENETC port node\n");
- return -ENODEV;
- }
-
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node) {
if (!of_phy_is_fixed_link(np)) {
@@ -929,9 +918,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
netif_carrier_off(ndev);
- netif_info(priv, probe, ndev, "%s v%s\n",
- enetc_drv_name, enetc_drv_ver);
-
return 0;
err_reg_netdev:
@@ -959,9 +945,6 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_sriov_configure(pdev, 0);
priv = netdev_priv(si->ndev);
- netif_info(priv, drv, si->ndev, "%s v%s remove\n",
- enetc_drv_name, enetc_drv_ver);
-
unregister_netdev(si->ndev);
enetc_mdio_remove(pf);
@@ -995,4 +978,3 @@ module_pci_driver(enetc_pf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index ebd21bf4cfa1..28a786b2f3e7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -4,12 +4,6 @@
#include <linux/module.h>
#include "enetc.h"
-#define ENETC_DRV_VER_MAJ 1
-#define ENETC_DRV_VER_MIN 0
-
-#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
- __stringify(ENETC_DRV_VER_MIN)
-static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC VF driver"
static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
@@ -201,9 +195,6 @@ static int enetc_vf_probe(struct pci_dev *pdev,
netif_carrier_off(ndev);
- netif_info(priv, probe, ndev, "%s v%s\n",
- enetc_drv_name, enetc_drv_ver);
-
return 0;
err_reg_netdev:
@@ -225,8 +216,6 @@ static void enetc_vf_remove(struct pci_dev *pdev)
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
- netif_info(priv, drv, si->ndev, "%s v%s remove\n",
- enetc_drv_name, enetc_drv_ver);
unregister_netdev(si->ndev);
enetc_free_msix(priv);
@@ -254,4 +243,3 @@ module_pci_driver(enetc_vf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 12edd4e358f8..c1c267b61647 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2128,7 +2128,6 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strlcpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
@@ -2529,15 +2528,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EINVAL;
}
- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
if (cycle > 0xFFFF) {
dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
if (cycle > 0xFFFF) {
- dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+ dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -2642,6 +2641,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 0139cb9042ec..34150182cc35 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -8,3 +8,31 @@ config FSL_FMAN
help
Freescale Data-Path Acceleration Architecture Frame Manager
(FMan) support
+
+config DPAA_ERRATUM_A050385
+ bool
+ depends on ARM64 && FSL_DPAA
+ default y
+ help
+ DPAA FMan erratum A050385 software workaround implementation:
+ align buffers, data start, SG fragment length to avoid FMan DMA
+ splits.
+ FMAN DMA read or writes under heavy traffic load may cause FMAN
+ internal resource leak thus stopping further packet processing.
+ The FMAN internal queue can overflow when FMAN splits single
+ read or write transactions into multiple smaller transactions
+ such that more than 17 AXI transactions are in flight from FMAN
+ to interconnect. When the FMAN internal queue overflows, it can
+ stall further packet processing. The issue can occur with any
+ one of the following three conditions:
+ 1. FMAN AXI transaction crosses 4K address boundary (Errata
+ A010022)
+ 2. FMAN DMA address for an AXI transaction is not 16 byte
+ aligned, i.e. the last 4 bits of an address are non-zero
+ 3. Scatter Gather (SG) frames have more than one SG buffer in
+ the SG list and any one of the buffers, except the last
+ buffer in the SG list has data size that is not a multiple
+ of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
+ With any one of the above three conditions present, there is
+ likelihood of stalled FMAN packet processing, especially under
+ stress with multiple ports injecting line-rate traffic.
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 934111def0be..f151d6e111dd 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,5 +1,6 @@
/*
* Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -566,6 +567,10 @@ struct fman_cfg {
u32 qmi_def_tnums_thresh;
};
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static bool fman_has_err_a050385;
+#endif
+
static irqreturn_t fman_exceptions(struct fman *fman,
enum fman_exceptions exception)
{
@@ -2518,6 +2523,14 @@ struct fman *fman_bind(struct device *fm_dev)
}
EXPORT_SYMBOL(fman_bind);
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void)
+{
+ return fman_has_err_a050385;
+}
+EXPORT_SYMBOL(fman_has_errata_a050385);
+#endif
+
static irqreturn_t fman_err_irq(int irq, void *handle)
{
struct fman *fman = (struct fman *)handle;
@@ -2845,6 +2858,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_free;
}
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ fman_has_err_a050385 =
+ of_property_read_bool(fm_node, "fsl,erratum-a050385");
+#endif
+
return fman;
fman_node_put:
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 935c317fa696..f2ede1360f03 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,5 +1,6 @@
/*
* Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
int fman_get_rx_extra_headroom(void);
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void);
+#endif
+
struct fman *fman_bind(struct device *dev);
#endif /* __FM_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1ca543ac8f2c..004c266802a8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,13 +366,26 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
+static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp;
+
+ tmp = (u32)((adr[5] << 24) |
+ (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+}
+
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
- phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+ phy_interface_t iface, u16 iface_speed, u64 addr,
u32 exception_mask, u8 tbi_addr)
{
bool is_rgmii, is_sgmii, is_qsgmii;
- int i;
+ enet_addr_t eth_addr;
u32 tmp;
+ int i;
/* Soft reset */
iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
@@ -501,12 +514,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(0xffffffff, &regs->ievent);
- tmp = (u32)((macaddr[5] << 24) |
- (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
- iowrite32be(tmp, &regs->macstnaddr1);
-
- tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
- iowrite32be(tmp, &regs->macstnaddr2);
+ if (addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
+ set_mac_address(regs, (u8 *)eth_addr);
+ }
/* HASH */
for (i = 0; i < NUM_OF_HASH_REGS; i++) {
@@ -519,18 +530,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
return 0;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
-{
- u32 tmp;
-
- tmp = (u32)((adr[5] << 24) |
- (adr[4] << 16) | (adr[3] << 8) | adr[2]);
- iowrite32be(tmp, &regs->macstnaddr1);
-
- tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
- iowrite32be(tmp, &regs->macstnaddr2);
-}
-
static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
bool enable)
{
@@ -556,10 +555,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
pr_err("1G MAC driver supports 1G or lower speeds\n");
return -EINVAL;
}
- if (dtsec->addr == 0) {
- pr_err("Ethernet MAC Must have a valid MAC Address\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -1391,9 +1386,8 @@ int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
- int err;
u16 max_frm_ln;
- enet_addr_t eth_addr;
+ int err;
if (is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
@@ -1410,10 +1404,8 @@ int dtsec_init(struct fman_mac *dtsec)
dtsec_drv_param = dtsec->dtsec_drv_param;
- MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
-
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
- dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
+ dtsec->max_speed, dtsec->addr, dtsec->exceptions,
dtsec->tbiphy->mdio.addr);
if (err) {
free_init_resources(dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index e1901874c19f..f2b2bfcbb529 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -596,10 +596,6 @@ static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
static int check_init_parameters(struct fman_mac *memac)
{
- if (memac->addr == 0) {
- pr_err("Ethernet MAC must have a valid MAC address\n");
- return -EINVAL;
- }
if (!memac->exception_cb) {
pr_err("Uninitialized exception handler\n");
return -EINVAL;
@@ -1057,8 +1053,10 @@ int memac_init(struct fman_mac *memac)
}
/* MAC Address */
- MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ if (memac->addr != 0) {
+ MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+ add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ }
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index f75b9c11b2d2..8c7eb878d5b4 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -273,10 +273,6 @@ static int check_init_parameters(struct fman_mac *tgec)
pr_err("10G MAC driver only support 10G speed\n");
return -EINVAL;
}
- if (tgec->addr == 0) {
- pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
- return -EINVAL;
- }
if (!tgec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -706,8 +702,10 @@ int tgec_init(struct fman_mac *tgec)
cfg = tgec->cfg;
- MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ if (tgec->addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+ set_mac_address(tgec->regs, (u8 *)eth_addr);
+ }
/* interrupts */
/* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 55f2122c3217..43427c5b9396 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -724,12 +724,10 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
- if (IS_ERR(mac_addr)) {
- dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
- }
- ether_addr_copy(mac_dev->addr, mac_addr);
+ if (IS_ERR(mac_addr))
+ dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
+ else
+ ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
@@ -855,7 +853,8 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0)
dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
- dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
+ if (!IS_ERR(mac_addr))
+ dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index add61fed33ee..ce85feaac357 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -53,7 +53,6 @@
MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
MODULE_DESCRIPTION("Freescale Ethernet Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
module_param(fs_enet_debug, int, 0);
@@ -790,7 +789,6 @@ static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int fs_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 195fae6aec4a..5ff2634bee2f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -190,8 +190,6 @@ void fs_cleanup_bds(struct net_device *dev);
#define DRV_MODULE_NAME "fs_enet"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "Sep 22, 2014"
/***************************************************************************/
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f7e5cafe89a9..b3c69e9038ea 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -103,8 +103,6 @@
#define TX_TIMEOUT (5*HZ)
-const char gfar_driver_version[] = "2.0";
-
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 432c6a818ae5..8ced783f5302 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -68,7 +68,6 @@ struct ethtool_rx_list {
#define RXBUF_ALIGNMENT 64
#define DRV_NAME "gfar-enet"
-extern const char gfar_driver_version[];
/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
#define MAX_TX_QS 0x8
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c8e4e2efc07..cc7d4f93da54 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -164,10 +164,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, gfar_driver_version,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
}
/* Return the length of the register structure */
@@ -276,35 +272,6 @@ static int gfar_gcoalesce(struct net_device *dev,
cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
cvals->tx_max_coalesced_frames = txcount;
- cvals->use_adaptive_rx_coalesce = 0;
- cvals->use_adaptive_tx_coalesce = 0;
-
- cvals->pkt_rate_low = 0;
- cvals->rx_coalesce_usecs_low = 0;
- cvals->rx_max_coalesced_frames_low = 0;
- cvals->tx_coalesce_usecs_low = 0;
- cvals->tx_max_coalesced_frames_low = 0;
-
- /* When the packet rate is below pkt_rate_high but above
- * pkt_rate_low (both measured in packets per second) the
- * normal {rx,tx}_* coalescing parameters are used.
- */
-
- /* When the packet rate is (measured in packets per second)
- * is above pkt_rate_high, the {rx,tx}_*_high parameters are
- * used.
- */
- cvals->pkt_rate_high = 0;
- cvals->rx_coalesce_usecs_high = 0;
- cvals->rx_max_coalesced_frames_high = 0;
- cvals->tx_coalesce_usecs_high = 0;
- cvals->tx_max_coalesced_frames_high = 0;
-
- /* How often to do adaptive coalescing packet rate sampling,
- * measured in seconds. Must not be zero.
- */
- cvals->rate_sample_interval = 0;
-
return 0;
}
@@ -1507,6 +1474,8 @@ static int gfar_get_ts_info(struct net_device *dev,
}
const struct ethtool_ops gfar_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0d101c00286f..6e5f6dd169b5 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3990,5 +3990,4 @@ module_exit(ucc_geth_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION(DRV_DESC);
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index a86a42131fc7..3fe903972195 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -26,7 +26,6 @@
#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
#define DRV_NAME "ucc_geth"
-#define DRV_VERSION "1.1"
#define NUM_TX_QUEUES 8
#define NUM_RX_QUEUES 8
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index dfebacf443fc..14c08a868190 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -334,8 +334,6 @@ uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d9718b87279d..12f6c2442a7a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -811,20 +811,6 @@ static int hip04_set_coalesce(struct net_device *netdev,
{
struct hip04_priv *priv = netdev_priv(netdev);
- /* Check not supported parameters */
- if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
- (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
(ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
@@ -845,6 +831,8 @@ static void hip04_get_drvinfo(struct net_device *netdev,
}
static const struct ethtool_ops hip04_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_coalesce = hip04_get_coalesce,
.set_coalesce = hip04_set_coalesce,
.get_drvinfo = hip04_get_drvinfo,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 717fccc2efba..49624acf2473 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1264,6 +1264,11 @@ static int hns_get_rxnfc(struct net_device *netdev,
}
static const struct ethtool_ops hns_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH |
+ ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH,
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
.get_ringparam = hns_get_ringparam,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 1b0313900f98..d87158acdf6f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
+ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index acb796cc10d0..8e04d3909321 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1711,7 +1711,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
- kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
+ kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
}
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -2228,7 +2228,7 @@ static void hns3_reset_prepare(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr prepare\n");
+ dev_info(&pdev->dev, "FLR prepare\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
ae_dev->ops->flr_prepare(ae_dev);
}
@@ -2237,7 +2237,7 @@ static void hns3_reset_done(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr done\n");
+ dev_info(&pdev->dev, "FLR done\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
ae_dev->ops->flr_done(ae_dev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 3f59a1924390..28b81f24afa1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1390,7 +1390,13 @@ static int hns3_set_fecparam(struct net_device *netdev,
return ops->set_fec(handle, fec_mode);
}
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_TX_USECS_HIGH)
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1416,6 +1422,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
};
static const struct ethtool_ops hns3_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 6295cf93c350..17228288d4df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -87,7 +87,7 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
- return (int)desc[offset / entries_per_desc].data[index];
+ return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -145,10 +145,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
+ if (!desc_src)
return;
- }
desc = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
@@ -179,6 +177,7 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
+ enum hclge_opcode_type cmd;
int rq_id, pri_id, qset_id;
int port_id, nq_id, pg_id;
struct hclge_desc desc[2];
@@ -193,10 +192,10 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
return;
}
- ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
- HCLGE_OPC_QSET_DFX_STS);
+ cmd = HCLGE_OPC_QSET_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
@@ -204,48 +203,53 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
- ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
+ cmd = HCLGE_OPC_PRI_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
- ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
+ cmd = HCLGE_OPC_PG_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_PORT_DFX_STS);
+ cmd = HCLGE_OPC_PORT_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
+ cmd = HCLGE_OPC_SCH_NQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
+ cmd = HCLGE_OPC_SCH_RQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
+ cmd = HCLGE_OPC_TM_INTERNAL_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
@@ -257,18 +261,18 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_TM_INTERNAL_CNT);
+ cmd = HCLGE_OPC_TM_INTERNAL_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_TM_INTERNAL_STS_1);
+ cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
@@ -277,6 +281,12 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
le32_to_cpu(desc[0].data[4]));
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
le32_to_cpu(desc[0].data[5]));
+ return;
+
+err_dcb_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
+ cmd, ret);
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -583,7 +593,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_map_cmd_send;
- qset_id = nq_to_qs_map->qset_id & 0x3FF;
+ qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
@@ -623,7 +633,8 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
+ qset_maping[group_id] =
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
@@ -826,6 +837,7 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
struct hclge_mac_ethertype_idx_rd_cmd *req0;
char printf_buf[HCLGE_DBG_BUF_LEN];
struct hclge_desc desc;
+ u32 msg_egress_port;
int ret, i;
dev_info(&hdev->pdev->dev, "mng tab:\n");
@@ -867,20 +879,21 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
"%x |%04x |%x |%04x|%x |%02x |%02x |",
!!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- req0->ethter_type,
+ le16_to_cpu(req0->ethter_type),
!!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
+ le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
!!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
req0->i_port_bitmap, req0->i_port_direction);
+ msg_egress_port = le16_to_cpu(req0->egress_port);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%d |%d |%02d |%04d|%x\n",
- !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- req0->egress_port & HCLGE_DBG_MNG_PF_ID,
- (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- req0->egress_queue,
- !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
+ "%x |%x |%02x |%04x|%x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
dev_info(&hdev->pdev->dev, "%s", printf_buf);
}
@@ -1067,11 +1080,8 @@ static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev,
- "allocate desc for get_m7_stats failed\n");
+ if (!desc_src)
return;
- }
desc_tmp = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
@@ -1134,7 +1144,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
const char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
-#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1158,8 +1168,8 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
while (length > 0) {
data0 = offset;
- if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
- data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
+ if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
else
data0 |= length << 16;
ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index c85b72dc44d2..50d5ef71756b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1667,9 +1667,6 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
hclge_handle_rocee_ras_error(ae_dev);
}
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- goto out;
-
if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 89d352385260..75d0d0fcd69b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2449,10 +2449,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
+ struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+ if (!mac->support_autoneg && mac->speed == speed &&
+ mac->duplex == duplex)
return 0;
ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
@@ -3442,7 +3444,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
u32 val;
if (hclge_get_hw_reset_stat(handle)) {
- dev_info(&pdev->dev, "Hardware reset not finish\n");
+ dev_info(&pdev->dev, "hardware reset not finish\n");
dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
@@ -3451,20 +3453,20 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
+ dev_info(&pdev->dev, "global reset requested\n");
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_FUNC_RESET:
- dev_info(&pdev->dev, "PF Reset requested\n");
+ dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", hdev->reset_type);
+ "unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
@@ -7354,7 +7356,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
@@ -7399,7 +7400,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -7619,11 +7619,17 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
}
ether_addr_copy(vport->vf_info.mac, mac_addr);
- dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+ return hclge_inform_reset_assert_to_vf(vport);
+ }
+
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
+ vf, mac_addr);
+ return 0;
}
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
@@ -7746,16 +7752,27 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
struct hclge_desc desc;
int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
-
+ /* read current vlan filter parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
- req->vlan_fe = filter_en ? fe_type : 0;
req->vf_id = vf_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vlan filter config, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->vlan_fe = filter_en ?
+ (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
+ dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
ret);
return ret;
@@ -8273,6 +8290,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
kfree(vlan);
}
}
+ clear_bit(vport->vport_id, hdev->vf_vlan_full);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -8489,6 +8507,28 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
}
}
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* clear port base vlan for all vf */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ vport = &hdev->vport[vf];
+ vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vf vlan for vf%d, ret = %d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -9898,6 +9938,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_mac *mac = &hdev->hw.mac;
hclge_reset_vf_rate(hdev);
+ hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
@@ -10252,8 +10293,9 @@ static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, data_len, bd_num, i;
+ int data_len_per_desc, bd_num, i;
int bd_num_list[BD_LIST_MAX_NUM];
+ u32 data_len;
int ret;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a3c0822191a9..3d850f6b1e37 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -799,6 +799,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_link_mode(vport, req);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
+ case HCLGE_MBX_VF_UNINIT:
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index d6597206e692..bd4bbcdde7d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2002,7 +2002,10 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_MBX;
}
- dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
+ cmdq_stat_reg);
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
@@ -2803,6 +2806,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
+ false, NULL, 0);
+
if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b7fc17756c51..06248a7db7f2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -872,7 +872,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
{
struct emac_regs __iomem *p = dev->emacp;
u32 r = 0;
- int n, err = -ETIMEDOUT;
+ int n;
mutex_lock(&dev->mdio_lock);
@@ -919,7 +919,6 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
goto bail;
}
}
- err = 0;
bail:
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 84121aab7ff1..96d36ae5049e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -712,29 +712,36 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
-static int netdev_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
+static int ibmveth_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL;
- cmd->base.port = PORT_FIBRE;
- cmd->base.phy_address = 0;
- cmd->base.autoneg = AUTONEG_ENABLE;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &adapter->speed,
+ &adapter->duplex);
+}
+
+static int ibmveth_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ cmd->base.speed = adapter->speed;
+ cmd->base.duplex = adapter->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
+static void ibmveth_init_link_settings(struct net_device *dev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ adapter->speed = SPEED_1000;
+ adapter->duplex = DUPLEX_FULL;
+}
+
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -965,12 +972,13 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = ibmveth_get_strings,
- .get_sset_count = ibmveth_get_sset_count,
- .get_ethtool_stats = ibmveth_get_ethtool_stats,
- .get_link_ksettings = netdev_get_link_ksettings,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ibmveth_get_strings,
+ .get_sset_count = ibmveth_get_sset_count,
+ .get_ethtool_stats = ibmveth_get_ethtool_stats,
+ .get_link_ksettings = ibmveth_get_link_ksettings,
+ .set_link_ksettings = ibmveth_set_link_ksettings,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -978,8 +986,6 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
-
static int ibmveth_send(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc *descs, unsigned long mss)
{
@@ -1674,6 +1680,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->netdev = netdev;
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
adapter->pool_config = 0;
+ ibmveth_init_link_settings(netdev);
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 4e9bf3421f4f..27dfff200166 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -162,6 +162,9 @@ struct ibmveth_adapter {
u64 tx_send_failed;
u64 tx_large_packets;
u64 rx_large_packets;
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
};
/*
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c75239d8820f..4bd33245bad6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2142,6 +2142,8 @@ static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
+ bool saved_state = false;
+ unsigned long flags;
u32 reset_state;
int rc = 0;
@@ -2153,17 +2155,25 @@ static void __ibmvnic_reset(struct work_struct *work)
return;
}
- reset_state = adapter->state;
-
rwi = get_next_rwi(adapter);
while (rwi) {
+ spin_lock_irqsave(&adapter->state_lock, flags);
+
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED) {
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
kfree(rwi);
rc = EBUSY;
break;
}
+ if (!saved_state) {
+ reset_state = adapter->state;
+ adapter->state = VNIC_RESETTING;
+ saved_state = true;
+ }
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+
if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
@@ -5091,6 +5101,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
__ibmvnic_delayed_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
@@ -5163,8 +5174,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev_get_drvdata(&dev->dev);
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->state_lock, flags);
+ if (adapter->state == VNIC_RESETTING) {
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+ return -EBUSY;
+ }
adapter->state = VNIC_REMOVING;
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+
rtnl_lock();
unregister_netdevice(netdev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 60eccaf91b12..f8416e1d4cf0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -941,7 +941,8 @@ enum vnic_state {VNIC_PROBING = 1,
VNIC_CLOSING,
VNIC_CLOSED,
VNIC_REMOVING,
- VNIC_REMOVED};
+ VNIC_REMOVED,
+ VNIC_RESETTING};
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
@@ -1090,4 +1091,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
+
+ /* Used for serializatin of state field */
+ spinlock_t state_lock;
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index be56e631d693..6f45df5690d4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1852,6 +1852,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9e7881db7859..1d47e2503072 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2307,6 +2307,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 68edf55ac906..37fbc646deb9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1151,6 +1151,8 @@ static int fm10k_set_channels(struct net_device *dev,
}
static const struct ethtool_ops fm10k_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
.get_ethtool_stats = fm10k_get_ethtool_stats,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 317f3f1458db..aa8026b1eb81 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5249,6 +5249,11 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
};
static const struct ethtool_ops i40e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH |
+ ETHTOOL_COALESCE_TX_USECS_HIGH,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index bd1b1ed323f4..bcd11b4b29df 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -81,7 +81,7 @@ struct iavf_vsi {
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
-#define IAVF_MAX_REQ_QUEUES 4
+#define IAVF_MAX_REQ_QUEUES 16
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 84c3d8d97ef6..2c39d46b6138 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = IAVF_MAX_REQ_QUEUES;
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
@@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- int num_req = ch->combined_count;
-
- if (num_req != adapter->num_active_queues &&
- !(adapter->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
- dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
- return -EINVAL;
- }
+ u32 num_req = ch->combined_count;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
- if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
+ if (num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
+ if (num_req == adapter->num_active_queues)
+ return 0;
+
if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
return -EINVAL;
adapter->num_req_queues = num_req;
- return iavf_request_queues(adapter, num_req);
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ iavf_schedule_reset(adapter);
+ return 0;
}
/**
@@ -998,6 +996,10 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops iavf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 76361bd468db..2050649848ba 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3450,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
if (num_req_queues &&
- num_req_queues != adapter->vsi_res->num_queue_pairs) {
+ num_req_queues > adapter->vsi_res->num_queue_pairs) {
/* Problem. The PF gave us fewer queues than what we had
* negotiated in our request. Need a reset to see if we can't
* get back to a working state.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1ab9cb339acb..d58374c2c33d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -397,33 +397,6 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
- * iavf_request_queues
- * @adapter: adapter structure
- * @num: number of requested queues
- *
- * We get a default number of queues from the PF. This enables us to request a
- * different number. Returns 0 on success, negative on failure
- **/
-int iavf_request_queues(struct iavf_adapter *adapter, int num)
-{
- struct virtchnl_vf_res_request vfres;
-
- if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
- /* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
- adapter->current_op);
- return -EBUSY;
- }
-
- vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
-
- adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
- return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
- (u8 *)&vfres, sizeof(vfres));
-}
-
-/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2d51ceaa2c8c..ce73a6a96aac 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -60,7 +60,6 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
@@ -70,7 +69,6 @@ extern const char ice_drv_ver[];
#define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
-#define ICE_MAX_SMALL_RSS_QS 8
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
@@ -213,6 +211,7 @@ enum ice_state {
__ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
+ __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_STATE_NBITS /* must be last */
};
@@ -363,8 +362,8 @@ struct ice_pf {
struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_vf_qps; /* num queue pairs per VF */
- u16 num_vf_msix; /* num vectors per VF */
+ u16 num_qps_per_vf;
+ u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 1fe54f08f162..e574a70fcc99 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -620,8 +620,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* @oem_ver: 8 bit NVM version
* @oem_build: 16 bit NVM build number
* @oem_patch: 8 NVM patch number
- * @ver_hi: high 16 bits of the NVM version
- * @ver_lo: low 16 bits of the NVM version
+ * @ver_hi: high 8 bits of the NVM version
+ * @ver_lo: low 8 bits of the NVM version
*/
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 16656b6c3d09..7bea09363b42 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -77,9 +77,9 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
mode = DCB_CAP_DCBX_LLD_MANAGED;
if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
- return (mode | DCB_CAP_DCBX_VER_CEE);
+ return mode | DCB_CAP_DCBX_VER_CEE;
else
- return (mode | DCB_CAP_DCBX_VER_IEEE);
+ return mode | DCB_CAP_DCBX_VER_IEEE;
}
/**
@@ -779,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
bool need_reconfig = false;
struct ice_port_info *pi;
struct ice_vsi *pf_vsi;
- u8 type;
+ u8 mib_type;
int ret;
/* Not DCB capable or capability disabled */
@@ -794,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */
- type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M);
- dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
- if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
+ mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
+ if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
- type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
- dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
- if (type == ICE_AQ_LLDP_MIB_REMOTE) {
+ mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
+ if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
@@ -832,10 +832,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
- pf->dcbx_cap = ice_dcb_get_mode(pi, false);
goto out;
}
+ pf->dcbx_cap = ice_dcb_get_mode(pi, false);
+
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index ab37dddb225b..e3d148f12aac 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -173,8 +173,8 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct ice_hw *hw = &pf->hw;
u16 oem_build;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information.
@@ -185,7 +185,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
"%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -1132,6 +1132,33 @@ done:
}
/**
+ * ice_nway_reset - restart autonegotiation
+ * @netdev: network interface device structure
+ */
+static int ice_nway_reset(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
+ enum ice_status status;
+
+ pi = vsi->port_info;
+ /* If VSI state is up, then restart autoneg with link up */
+ if (!test_bit(__ICE_DOWN, vsi->back->state))
+ status = ice_aq_set_link_restart_an(pi, true, NULL);
+ else
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
+
+ if (status) {
+ netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
+ status, pi->hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
@@ -1264,6 +1291,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
dev_dbg(dev, "Fail to enable MIB change events\n");
+
+ ice_nway_reset(netdev);
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -2775,30 +2804,6 @@ done:
return err;
}
-static int ice_nway_reset(struct net_device *netdev)
-{
- /* restart autonegotiation */
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_port_info *pi;
- enum ice_status status;
-
- pi = vsi->port_info;
- /* If VSI state is up, then restart autoneg with link up */
- if (!test_bit(__ICE_DOWN, vsi->back->state))
- status = ice_aq_set_link_restart_an(pi, true, NULL);
- else
- status = ice_aq_set_link_restart_an(pi, false, NULL);
-
- if (status) {
- netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
- status, pi->hw->adminq.sq_last_status);
- return -EIO;
- }
-
- return 0;
-}
-
/**
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
@@ -3452,12 +3457,6 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
- if (ec->tx_coalesce_usecs_high) {
- netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
- c_type_str);
- return -EINVAL;
- }
-
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3534,53 +3533,6 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
}
/**
- * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
- * @netdev: pointer to the netdev associated with this query
- * @ec: ethtool structure to fill with driver's coalesce settings
- *
- * Print netdev info if driver doesn't support one of the parameters
- * and return error. When any parameters will be implemented, remove only
- * this parameter from param array.
- */
-static int
-ice_is_coalesce_param_invalid(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct ice_ethtool_not_used {
- u32 value;
- const char *name;
- } param[] = {
- {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
- {ec->rate_sample_interval, "sample-interval"},
- {ec->pkt_rate_low, "pkt-rate-low"},
- {ec->pkt_rate_high, "pkt-rate-high"},
- {ec->rx_max_coalesced_frames, "rx-frames"},
- {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
- {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
- {ec->tx_max_coalesced_frames, "tx-frames"},
- {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
- {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
- {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
- {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
- {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
- {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
- {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
- {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(param); i++) {
- if (param[i].value) {
- netdev_info(netdev, "Setting %s not supported\n",
- param[i].name);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
* @netdev: netdev used for print
* @itr_setting: previous user setting
@@ -3620,9 +3572,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_is_coalesce_param_invalid(netdev, ec))
- return -EINVAL;
-
if (q_num < 0) {
struct ice_q_vector *q_vector = vsi->q_vectors[0];
int v_idx;
@@ -3817,6 +3766,9 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -3866,6 +3818,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings,
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index a05ceb59863b..3de862a3c789 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -694,7 +694,7 @@ out:
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
- * @type: type of the field
+ * @field_type: type of the field
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
* entry's input buffer
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
@@ -715,16 +715,16 @@ out:
*/
static void
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
- enum ice_flow_fld_match_type type, u16 val_loc,
+ enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit;
- if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
- seg->fields[fld].type = type;
+ seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc;
seg->fields[fld].src.mask = mask_loc;
seg->fields[fld].src.last = last_loc;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ff72a6d1c978..2f256bf45efc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -9,11 +9,11 @@
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
- * @type: VSI type enum
+ * @vsi_type: VSI type enum
*/
-const char *ice_vsi_type_str(enum ice_vsi_type type)
+const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
{
- switch (type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
return "ICE_VSI_PF";
case ICE_VSI_VF:
@@ -178,12 +178,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vf = &pf->vf[vsi->vf_id];
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_vf_msix includes (VF miscellaneous vector +
+ /* pf->num_msix_per_vf includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
@@ -350,13 +350,13 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
- * @type: type of VSI
+ * @vsi_type: type of VSI
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -377,13 +377,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
if (!vsi)
goto unlock_pf;
- vsi->type = type;
+ vsi->type = vsi_type;
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
vsi->idx = pf->next_vsi;
- if (type == ICE_VSI_VF)
+ if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
@@ -571,12 +571,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
case ICE_VSI_VF:
- /* VF VSI will gets a small RSS table
- * For VSI_LUT, LUT size should be set to 64 bytes
+ /* VF VSI will get a small RSS table.
+ * For VSI_LUT, LUT size should be set to 64 bytes.
*/
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vsi->rss_size = min_t(int, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
+ vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_LB:
@@ -684,7 +683,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
if (vsi->type == ICE_VSI_PF)
max_rss = ICE_MAX_LG_RSS_QS;
else
- max_rss = ICE_MAX_SMALL_RSS_QS;
+ max_rss = ICE_MAX_RSS_QS_PER_VF;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
qcount_rx = min_t(int, qcount_rx,
@@ -908,6 +907,109 @@ out:
}
/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ *
+ * Returns number of resources freed
+ */
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->end)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->end && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = 0, end = 0;
+
+ if (needed > res->end)
+ return -ENOMEM;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->end)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ return start;
+ }
+ } while (end < res->end);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_free_res_count - Get free count from a resource tracker
+ * @res: Resource tracker instance
+ */
+static u16 ice_get_free_res_count(struct ice_res_tracker *res)
+{
+ u16 i, count = 0;
+
+ for (i = 0; i < res->end; i++)
+ if (!(res->list[i] & ICE_RES_VALID_BIT))
+ count++;
+
+ return count;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or negative for error
+ */
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ return ice_search_res(res, needed, id);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -939,8 +1041,9 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->base_vector);
+ dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
+ ice_get_free_res_count(pf->irq_tracker),
+ ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
pf->num_avail_sw_msix -= num_q_vectors;
@@ -1774,20 +1877,14 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
* ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
* @vsi: VSI to check whether or not VLAN pruning is enabled.
*
- * returns true if Rx VLAN pruning and Tx VLAN anti-spoof is enabled and false
- * otherwise.
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
*/
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
{
- u8 rx_pruning = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- u8 tx_pruning = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
-
if (!vsi)
return false;
- return ((vsi->info.sw_flags2 & rx_pruning) &&
- (vsi->info.sec_flags & tx_pruning));
+ return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
}
/**
@@ -1987,7 +2084,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
- * @type: VSI type
+ * @vsi_type: VSI type
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
@@ -1999,7 +2096,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2007,10 +2104,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (type == ICE_VSI_VF)
- vsi = ice_vsi_alloc(pf, type, vf_id);
+ if (vsi_type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
- vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2347,94 +2444,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- *
- * Returns number of resources freed
- */
-int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
- int count = 0;
- int i;
-
- if (!res || index >= res->end)
- return -EINVAL;
-
- id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->end && res->list[i] == id; i++) {
- res->list[i] = 0;
- count++;
- }
-
- return count;
-}
-
-/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- */
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
-{
- int start = 0, end = 0;
-
- if (needed > res->end)
- return -ENOMEM;
-
- id |= ICE_RES_VALID_BIT;
-
- do {
- /* skip already allocated entries */
- if (res->list[end++] & ICE_RES_VALID_BIT) {
- start = end;
- if ((start + needed) > res->end)
- break;
- }
-
- if (end == (start + needed)) {
- int i = start;
-
- /* there was enough, so assign it to the requestor */
- while (i != end)
- res->list[i++] = id;
-
- return start;
- }
- } while (end < res->end);
-
- return -ENOMEM;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or negative for error
- */
-int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
- if (!res || !pf)
- return -EINVAL;
-
- if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
- needed, res->num_entries, id);
- return -EINVAL;
- }
-
- return ice_search_res(res, needed, id);
-}
-
-/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 585f1350403f..04ca00799364 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,7 +6,7 @@
#include "ice.h"
-const char *ice_vsi_type_str(enum ice_vsi_type type);
+const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -58,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id);
void ice_napi_del(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3aa3fc37c70e..89c090d32bb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1518,7 +1518,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
@@ -2054,8 +2054,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_VFLR_M) {
- ena_mask &= ~PFINT_OICR_VFLR_M;
- set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, PFINT_OICR_ENA);
+
+ reg &= ~PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ } else {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (oicr & PFINT_OICR_GRST_M) {
@@ -3087,30 +3095,22 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
* followed by a EUI-64 identifier (PCIe Device Serial Number)
*/
struct pci_dev *pdev = pf->pdev;
- char *opt_fw_filename = NULL;
- u32 dword;
- u8 dsn[8];
- int pos;
+ char *opt_fw_filename;
+ u64 dsn;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability).
*/
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
- if (!opt_fw_filename)
- return NULL;
-
- pci_read_config_dword(pdev, pos + 4, &dword);
- put_unaligned_le32(dword, &dsn[0]);
- pci_read_config_dword(pdev, pos + 8, &dword);
- put_unaligned_le32(dword, &dsn[4]);
- snprintf(opt_fw_filename, NAME_MAX,
- "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
- ICE_DDP_PKG_PATH,
- dsn[7], dsn[6], dsn[5], dsn[4],
- dsn[3], dsn[2], dsn[1], dsn[0]);
- }
+ dsn = pci_get_dsn(pdev);
+ if (!dsn)
+ return NULL;
+
+ opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
+ if (!opt_fw_filename)
+ return NULL;
+
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
}
@@ -3388,11 +3388,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
+ set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
+ ice_free_vfs(pf);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
- ice_free_vfs(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -5094,13 +5097,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 4d96abfd05d6..51825a203e35 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
struct ice_aqc_get_sw_cfg_resp_elem *ele;
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
- u8 type;
+ u8 res_type;
ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
@@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
- type = le16_to_cpu(ele->vsi_port_num) >>
+ res_type = le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
- if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
+ if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
continue;
}
ice_init_port_info(hw->port_info, vsi_port_num,
- type, swid, pf_vf_num, is_vf);
+ res_type, swid, pf_vf_num, is_vf);
}
} while (req_desc && !status);
@@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status;
u16 s_rule_size;
- u16 type;
+ u16 rule_type;
int i;
if (!num_vsi)
@@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
- type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
- ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
- type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
- ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
+ rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
return ICE_ERR_PARAM;
@@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
- s_rule->type = cpu_to_le16(type);
+ s_rule->type = cpu_to_le16(rule_type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 6bedfa4676ae..15191a325918 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -99,8 +99,8 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
*/
static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
{
- return (!bitmap_weight(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF) &&
- !bitmap_weight(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF));
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
}
/**
@@ -170,7 +170,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
+ last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
/* clear VF MDD event information */
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
@@ -206,7 +206,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
- last = first + pf->num_vf_msix - 1;
+ last = first + pf->num_msix_per_vf - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -232,11 +232,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
* ice_sriov_free_msix_res - Reset/free any used MSIX resources
* @pf: pointer to the PF structure
*
- * If MSIX entries from the pf->irq_tracker were needed then we need to
- * reset the irq_tracker->end and give back the entries we needed to
- * num_avail_sw_msix.
- *
- * If no MSIX entries were taken from the pf->irq_tracker then just clear
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
* the pf->sriov_base_vector.
*
* Returns 0 on success, and -EINVAL on error.
@@ -253,11 +249,7 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
return -EINVAL;
/* give back irq_tracker resources used */
- if (pf->sriov_base_vector < res->num_entries) {
- res->end = res->num_entries;
- pf->num_avail_sw_msix +=
- res->num_entries - pf->sriov_base_vector;
- }
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
pf->sriov_base_vector = 0;
@@ -271,8 +263,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
@@ -308,11 +300,6 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
- ice_dis_vf_qs(&pf->vf[i]);
-
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
@@ -322,8 +309,13 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+ /* Avoid wait time by stopping all VFs at the same time */
+ ice_for_each_vf(pf, i)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
+ ice_dis_vf_qs(&pf->vf[i]);
+
tmp = pf->num_alloc_vfs;
- pf->num_vf_qps = 0;
+ pf->num_qps_per_vf = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
@@ -511,7 +503,7 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
- return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+ return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
}
/**
@@ -604,7 +596,7 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
*/
tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
ice_get_avail_rxq_count(pf));
- tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ tx_rx_queue_left += pf->num_qps_per_vf;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -650,9 +642,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
- last = (first + pf->num_vf_msix) - 1;
+ last = (first + pf->num_msix_per_vf) - 1;
abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
- abs_last = (abs_first + pf->num_vf_msix) - 1;
+ abs_last = (abs_first + pf->num_msix_per_vf) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
@@ -770,7 +762,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
pf = vf->pf;
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
+ return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
q_vector->v_idx + 1;
}
@@ -803,127 +795,112 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
* This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so in many cases the irq_tracker will not
- * be needed. In these cases we just set the pf->sriov_base_vector and return
- * success.
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * If SR-IOV needs to use any pf->irq_tracker entries it updates the
- * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
- * so any calls to ice_get_res() using the irq_tracker will not try to use
- * resources at or beyond the newly set value.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
* Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- u16 pf_total_msix_vectors =
- pf->hw.func_caps.common_cap.num_msix_vectors;
- struct ice_res_tracker *res = pf->irq_tracker;
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
int sriov_base_vector;
- if (max_valid_res_idx < 0)
- return max_valid_res_idx;
-
- sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
+ sriov_base_vector = total_vectors - num_msix_needed;
/* make sure we only grab irq_tracker entries from the list end and
* that we have enough available MSIX vectors
*/
- if (sriov_base_vector <= max_valid_res_idx)
+ if (sriov_base_vector < vectors_used)
return -EINVAL;
pf->sriov_base_vector = sriov_base_vector;
- /* dip into irq_tracker entries and update used resources */
- if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
- pf->num_avail_sw_msix -=
- res->num_entries - pf->sriov_base_vector;
- res->end = pf->sriov_base_vector;
- }
-
return 0;
}
/**
- * ice_check_avail_res - check if vectors and queues are available
+ * ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
*
- * This function is where we calculate actual number of resources for VF VSIs,
- * we don't reserve ahead of time during probe. Returns success if vectors and
- * queues resources are available, otherwise returns error code
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
+ *
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-static int ice_check_avail_res(struct ice_pf *pf)
+static int ice_set_per_vf_res(struct ice_pf *pf)
{
int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- u16 num_msix, num_txq, num_rxq, num_avail_msix;
+ int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
+ u16 num_msix_per_vf, num_txq, num_rxq;
if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
- /* add 1 to max_valid_res_idx to account for it being 0-based */
- num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
- (max_valid_res_idx + 1);
-
- /* Grab from HW interrupts common pool
- * Note: By the time the user decides it needs more vectors in a VF
- * its already too late since one must decide this prior to creating the
- * VF interface. So the best we can do is take a guess as to what the
- * user might want.
- *
- * We have two policies for vector allocation:
- * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
- * number of NFV VFs used for NFV appliances, since this is a special
- * case, we try to assign maximum vectors per VF (65) as much as
- * possible, based on determine_resources algorithm.
- * 2. if num_alloc_vfs is from 17 to 256, then its large number of
- * regular VFs which are not used for any special purpose. Hence try to
- * grab default interrupt vectors (5 as supported by AVF driver).
- */
- if (pf->num_alloc_vfs <= 16) {
- num_msix = ice_determine_res(pf, num_avail_msix,
- ICE_MAX_INTR_PER_VF,
- ICE_MIN_INTR_PER_VF);
- } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
- num_msix = ice_determine_res(pf, num_avail_msix,
- ICE_DFLT_INTR_PER_VF,
- ICE_MIN_INTR_PER_VF);
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
- dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
- pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ pf->num_alloc_vfs);
return -EIO;
}
- if (!num_msix)
- return -EIO;
-
- /* Grab from the common pool
- * start by requesting Default queues (4 as supported by AVF driver),
- * Note that, the main difference between queues and vectors is, latter
- * can only be reserved at init time but queues can be requested by VF
- * at runtime through Virtchnl, that is the reason we start by reserving
- * few queues.
- */
+ /* determine queue resources per VF */
num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
+ min_t(u16,
+ num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF),
+ ICE_MIN_QS_PER_VF);
num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq)
+ min_t(u16,
+ num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF),
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
return -EIO;
+ }
- if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
+ if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
+ pf->num_alloc_vfs);
return -EINVAL;
+ }
- /* since AVF driver works with only queue pairs which means, it expects
- * to have equal number of Rx and Tx queues, so take the minimum of
- * available Tx or Rx queues
- */
- pf->num_vf_qps = min_t(int, num_txq, num_rxq);
- pf->num_vf_msix = num_msix;
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
+ pf->num_msix_per_vf = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
return 0;
}
@@ -1032,7 +1009,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int v;
- if (ice_check_avail_res(pf)) {
+ if (ice_set_per_vf_res(pf)) {
dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
return false;
}
@@ -1045,7 +1022,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
- vf->num_vf_qs = pf->num_vf_qps;
+ vf->num_vf_qs = pf->num_qps_per_vf;
dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
@@ -1178,7 +1155,8 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
*
- * Returns true if the VF is reset, false otherwise.
+ * Returns true if the VF is currently in reset, resets successfully, or resets
+ * are disabled and false otherwise.
*/
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
@@ -1193,6 +1171,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev = ice_pf_to_dev(pf);
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return true;
+ }
+
if (ice_is_vf_disabled(vf)) {
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
@@ -1445,7 +1429,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
if (num_vfs > pf->num_vfs_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
num_vfs, pf->num_vfs_supported);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
dev_info(dev, "Allocating %d VFs\n", num_vfs);
@@ -1754,7 +1738,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_vf_msix;
+ vfres->max_vectors = pf->num_msix_per_vf;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
@@ -2126,8 +2110,8 @@ error_param:
static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
{
if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_BASE_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_BASE_QS_PER_VF))
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
return false;
return true;
@@ -2176,7 +2160,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2198,7 +2182,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2255,12 +2239,6 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
- vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2270,7 +2248,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (vqs->tx_queues) {
q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
struct ice_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
@@ -2301,7 +2279,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->rx_queues;
/* speed up Rx queue disable by batching them if possible */
if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF)) {
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
if (ice_vsi_stop_all_rx_rings(vsi)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
vsi->vsi_num);
@@ -2309,9 +2287,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2345,6 +2323,57 @@ error_param:
}
/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @vector_id: vector ID
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static int
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
* ice_vc_cfg_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2354,13 +2383,11 @@ error_param:
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
- u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_pf *pf = vf->pf;
- u16 num_q_vectors_mapped;
struct ice_vsi *vsi;
- unsigned long qmap;
int i;
irqmap_info = (struct virtchnl_irq_map_info *)msg;
@@ -2371,8 +2398,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_vf_msix < num_q_vectors_mapped ||
- !irqmap_info->num_vectors) {
+ pf->num_msix_per_vf < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2393,7 +2420,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
+ if (!(vector_id < pf->num_msix_per_vf) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2414,33 +2441,10 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
- qmap = map->rxq_map;
- q_vector->num_ring_rx = 0;
- for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- q_vector->num_ring_tx = 0;
- for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->tx.itr_idx);
- }
+ v_ret = (enum virtchnl_status_code)
+ ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ if (v_ret)
+ goto error_param;
}
error_param:
@@ -2483,7 +2487,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
@@ -2790,16 +2794,16 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
if (!req_queues) {
dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
vf->vf_id);
- } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_BASE_QS_PER_VF);
+ ICE_MAX_RSS_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 36dad0eba3db..3f9464269bd2 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -21,18 +21,15 @@
#define ICE_PCI_CIAD_WAIT_COUNT 100
#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-/* VF resources default values and limitation */
+/* VF resource constraints */
#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MAX_POLICY_INTR_PER_VF 33
+#define ICE_MAX_RSS_QS_PER_VF 16
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -75,8 +72,8 @@ struct ice_vf {
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
- DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
u16 port_vlan_info; /* Port VLAN ID and QoS */
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 3479e1de98fe..8a4ba7c6d549 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
struct xdp_umem __always_unused *umem,
u16 __always_unused qid)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void
@@ -63,7 +63,7 @@ static inline int
ice_xsk_wakeup(struct net_device __always_unused *netdev,
u32 __always_unused queue_id, u32 __always_unused flags)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f96ffa83efbe..39d3b76a6f5d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2183,27 +2183,6 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -ENOTSUPP;
-
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -3477,6 +3456,7 @@ static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops igb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 3ae358b35227..9217d150e286 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -424,6 +424,7 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 69f50b8e2af3..f530fc29b074 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -861,27 +861,6 @@ static int igc_set_coalesce(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -ENOTSUPP;
-
if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS ||
(ec->rx_coalesce_usecs > 3 &&
ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
@@ -1915,6 +1894,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops igc_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igc_get_drvinfo,
.get_regs_len = igc_get_regs_len,
.get_regs = igc_get_regs,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7c52ae8ac005..c6bf0a50ee63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3444,6 +3444,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ccd852ad62a4..ec7a11d13fdc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -968,8 +968,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int i, pos;
- u8 buf[8];
+ u64 dsn;
if (!info)
return -EINVAL;
@@ -985,17 +984,11 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/* Serial Number */
/* Get the PCI-e Device Serial Number Capability */
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- pos += 4;
- for (i = 0; i < 8; i++)
- pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
-
+ dsn = pci_get_dsn(adapter->pdev);
+ if (dsn)
snprintf(info->serial_number, sizeof(info->serial_number),
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- buf[7], buf[6], buf[5], buf[4],
- buf[3], buf[2], buf[1], buf[0]);
- } else
+ "%016llX", dsn);
+ else
snprintf(info->serial_number, sizeof(info->serial_number),
"Unknown");
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f7f309c96fa8..988fa49fa99a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -968,6 +968,7 @@ static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 0b9e851f3da4..d2e2dc538428 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
}
- dev->err_interrupt = platform_get_irq(pdev, 0);
+ dev->err_interrupt = platform_get_irq_optional(pdev, 0);
if (dev->err_interrupt > 0 &&
resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
dev_err(&pdev->dev,
@@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
writel(MVMDIO_ERR_INT_SMI_DONE,
dev->regs + MVMDIO_ERR_INT_MASK);
- } else if (dev->err_interrupt == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ } else if (dev->err_interrupt < 0) {
+ ret = dev->err_interrupt;
goto out_mdio;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b22eeb5f8700..bc488e8b8e45 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -344,8 +344,10 @@ enum {
ETHTOOL_XDP_REDIRECT,
ETHTOOL_XDP_PASS,
ETHTOOL_XDP_DROP,
- ETHTOOL_XDP_XMIT,
ETHTOOL_XDP_TX,
+ ETHTOOL_XDP_TX_ERR,
+ ETHTOOL_XDP_XMIT,
+ ETHTOOL_XDP_XMIT_ERR,
ETHTOOL_MAX_STATS,
};
@@ -404,7 +406,9 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
{ ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
{ ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
+ { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
+ { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
};
struct mvneta_stats {
@@ -417,7 +421,9 @@ struct mvneta_stats {
u64 xdp_pass;
u64 xdp_drop;
u64 xdp_xmit;
+ u64 xdp_xmit_err;
u64 xdp_tx;
+ u64 xdp_tx_err;
};
struct mvneta_ethtool_stats {
@@ -2059,6 +2065,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
static int
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
struct xdp_frame *xdpf;
@@ -2076,8 +2083,6 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
__netif_tx_lock(nq, cpu);
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
if (ret == MVNETA_XDP_TX) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
-
u64_stats_update_begin(&stats->syncp);
stats->es.ps.tx_bytes += xdpf->len;
stats->es.ps.tx_packets++;
@@ -2085,6 +2090,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
u64_stats_update_end(&stats->syncp);
mvneta_txq_pend_desc_add(pp, txq, 0);
+ } else {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.xdp_tx_err++;
+ u64_stats_update_end(&stats->syncp);
}
__netif_tx_unlock(nq);
@@ -2128,6 +2137,7 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
stats->es.ps.tx_bytes += nxmit_byte;
stats->es.ps.tx_packets += nxmit;
stats->es.ps.xdp_xmit += nxmit;
+ stats->es.ps.xdp_xmit_err += num_frame - nxmit;
u64_stats_update_end(&stats->syncp);
return nxmit;
@@ -2152,7 +2162,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int err;
err = xdp_do_redirect(pp->dev, xdp, prog);
- if (err) {
+ if (unlikely(err)) {
ret = MVNETA_XDP_DROPPED;
page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data), len,
@@ -4518,6 +4528,8 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
u64 skb_alloc_error;
u64 refill_error;
u64 xdp_redirect;
+ u64 xdp_xmit_err;
+ u64 xdp_tx_err;
u64 xdp_pass;
u64 xdp_drop;
u64 xdp_xmit;
@@ -4532,7 +4544,9 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
xdp_pass = stats->es.ps.xdp_pass;
xdp_drop = stats->es.ps.xdp_drop;
xdp_xmit = stats->es.ps.xdp_xmit;
+ xdp_xmit_err = stats->es.ps.xdp_xmit_err;
xdp_tx = stats->es.ps.xdp_tx;
+ xdp_tx_err = stats->es.ps.xdp_tx_err;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
es->skb_alloc_error += skb_alloc_error;
@@ -4541,7 +4555,9 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
es->ps.xdp_pass += xdp_pass;
es->ps.xdp_drop += xdp_drop;
es->ps.xdp_xmit += xdp_xmit;
+ es->ps.xdp_xmit_err += xdp_xmit_err;
es->ps.xdp_tx += xdp_tx;
+ es->ps.xdp_tx_err += xdp_tx_err;
}
}
@@ -4594,9 +4610,15 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_XDP_TX:
pp->ethtool_stats[i] = stats.ps.xdp_tx;
break;
+ case ETHTOOL_XDP_TX_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
+ break;
case ETHTOOL_XDP_XMIT:
pp->ethtool_stats[i] = stats.ps.xdp_xmit;
break;
+ case ETHTOOL_XDP_XMIT_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
+ break;
}
break;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 35478cba2aa5..0a0c6ec2336c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1082,6 +1082,9 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
u8 qh, ql, pmap;
int index, ctx;
+ if (!flow_action_basic_hw_stats_types_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
memset(&c2, 0, sizeof(c2));
index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
@@ -1305,6 +1308,9 @@ static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
struct flow_rule *flow = rule->flow;
struct flow_action_entry *act;
+ if (!flow_action_basic_hw_stats_types_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
act = &flow->action.entries[0];
if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 9f5b7229574e..a4e65da8d95b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -367,6 +367,107 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return;
+ if (enable) {
+ /* Enable receive pause frames */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Enable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ /* Set pause time and interval */
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+
+ cfg = cgx_read(cgx, lmac_id,
+ CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+ } else {
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ }
+}
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
@@ -544,59 +645,6 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false;
}
-static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
- struct cgx *cgx)
-{
- u64 req = 0;
- u64 resp;
- int err;
-
- req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
- if (!err)
- *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
-
- return err;
-}
-
-static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
- struct cgx *cgx)
-{
- u64 req = 0;
- u64 resp;
- int err;
-
- req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
- if (!err)
- *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
-
- return err;
-}
-
-int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
-{
- struct cgx *cgx_dev;
- int err;
-
- if (!addr || !size)
- return -EINVAL;
-
- cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
- if (!cgx_dev)
- return -ENXIO;
-
- err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
- if (err)
- return -EIO;
-
- err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
- if (err)
- return -EIO;
-
- return 0;
-}
-
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
struct lmac *lmac = data;
@@ -680,6 +728,24 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
return 0;
}
+int cgx_get_fwdata_base(u64 *base)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err;
+
+ cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *base = FIELD_GET(RESP_FWD_BASE, resp);
+
+ return err;
+}
+
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
@@ -787,6 +853,7 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[i] = lmac;
+ cgx_lmac_pause_frm_config(cgx, i, true);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -805,6 +872,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
+ cgx_lmac_pause_frm_config(cgx, i, false);
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 9343bf39cfac..394f96591feb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -60,10 +60,20 @@
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
+#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
+#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
+#define CGXX_CMR_RX_OVR_BP 0x130
+#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
+#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
@@ -124,5 +134,9 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
-int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
+int cgx_get_fwdata_base(u64 *base);
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index 473d9751601f..c3702fa58b6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -79,7 +79,8 @@ enum cgx_cmd_id {
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
- CGX_CMD_GET_MKEX_PRFL_ADDR
+ CGX_CMD_GET_MKEX_PRFL_ADDR,
+ CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
};
/* async event ids */
@@ -149,6 +150,11 @@ enum cgx_cmd_own {
*/
#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 8bbc1f1d81f5..6dfd0f90cd70 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
-M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
@@ -143,6 +143,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -211,6 +213,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -251,7 +256,8 @@ enum rvu_af_status {
struct ready_msg_rsp {
struct mbox_msghdr hdr;
- u16 sclk_feq; /* SCLK frequency */
+ u16 sclk_freq; /* SCLK frequency (in MHz) */
+ u16 rclk_freq; /* RCLK frequency (in MHz) */
};
/* Structure for requesting resource provisioning.
@@ -342,6 +348,15 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info;
};
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ u8 set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -676,6 +691,25 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx;
};
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ u16 chan_base; /* Starting channel number */
+ u8 chan_cnt; /* Number of channels */
+ u8 bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_BPID_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
+ u8 chan_cnt; /* Number of channel for which bpids are assigned */
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c190c3ce898..5ff25bf8419e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -88,13 +88,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset);
- while (time_before(jiffies, timeout)) {
- reg_val = readq(reg);
- if (zero && !(reg_val & mask))
- return 0;
- if (!zero && (reg_val & mask))
- return 0;
+again:
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ if (time_before(jiffies, timeout)) {
usleep_range(1, 5);
+ goto again;
}
return -EBUSY;
}
@@ -421,6 +423,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
}
}
+static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
+}
+
+static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+}
+
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
int err;
@@ -603,7 +618,11 @@ setup_vfmsix:
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF;
- phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+
iova = dma_map_resource(rvu->dev, phy_addr,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
@@ -613,10 +632,18 @@ setup_vfmsix:
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
rvu->msix_base_iova = iova;
+ rvu->msixtr_base_phy = phy_addr;
return 0;
}
+static void rvu_reset_msix(struct rvu *rvu)
+{
+ /* Restore msixtr base register */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
+ rvu->msixtr_base_phy);
+}
+
static void rvu_free_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -655,9 +682,80 @@ static void rvu_free_hw_resources(struct rvu *rvu)
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+ rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
}
+static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct rvu_pfvf *pfvf;
+ u64 *mac;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ /* Assign MAC address to PF */
+ pfvf = &rvu->pf[pf];
+ if (rvu->fwdata && pf < PF_MACNUM_MAX) {
+ mac = &rvu->fwdata->pf_macs[pf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+
+ /* Assign MAC address to VFs */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pfvf = &rvu->hwvf[hwvf];
+ if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
+ mac = &rvu->fwdata->vf_macs[hwvf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ }
+ }
+}
+
+static int rvu_fwdata_init(struct rvu *rvu)
+{
+ u64 fwdbase;
+ int err;
+
+ /* Get firmware data base address */
+ err = cgx_get_fwdata_base(&fwdbase);
+ if (err)
+ goto fail;
+ rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
+ if (!rvu->fwdata)
+ goto fail;
+ if (!is_rvu_fwdata_valid(rvu)) {
+ dev_err(rvu->dev,
+ "Mismatch in 'fwdata' struct btw kernel and firmware\n");
+ iounmap(rvu->fwdata);
+ rvu->fwdata = NULL;
+ return -EINVAL;
+ }
+ return 0;
+fail:
+ dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
+ return -EIO;
+}
+
+static void rvu_fwdata_exit(struct rvu *rvu)
+{
+ if (rvu->fwdata)
+ iounmap(rvu->fwdata);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -813,6 +911,8 @@ init:
mutex_init(&rvu->rsrc_lock);
+ rvu_fwdata_init(rvu);
+
err = rvu_setup_msix_resources(rvu);
if (err)
return err;
@@ -825,8 +925,10 @@ init:
/* Allocate memory for block LF/slot to pcifunc mapping info */
block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
sizeof(u16), GFP_KERNEL);
- if (!block->fn_map)
- return -ENOMEM;
+ if (!block->fn_map) {
+ err = -ENOMEM;
+ goto msix_err;
+ }
/* Scan all blocks to check if low level firmware has
* already provisioned any of the resources to a PF/VF.
@@ -836,25 +938,36 @@ init:
err = rvu_npc_init(rvu);
if (err)
- goto exit;
+ goto npc_err;
err = rvu_cgx_init(rvu);
if (err)
- goto exit;
+ goto cgx_err;
+
+ /* Assign MACs for CGX mapped functions */
+ rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
if (err)
- goto cgx_err;
+ goto npa_err;
err = rvu_nix_init(rvu);
if (err)
- goto cgx_err;
+ goto nix_err;
return 0;
+nix_err:
+ rvu_nix_freemem(rvu);
+npa_err:
+ rvu_npa_freemem(rvu);
cgx_err:
rvu_cgx_exit(rvu);
-exit:
+npc_err:
+ rvu_npc_freemem(rvu);
+ rvu_fwdata_exit(rvu);
+msix_err:
+ rvu_reset_msix(rvu);
return err;
}
@@ -901,6 +1014,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
+ if (rvu->fwdata) {
+ rsp->rclk_freq = rvu->fwdata->rclk;
+ rsp->sclk_freq = rvu->fwdata->sclk;
+ }
return 0;
}
@@ -2128,6 +2245,9 @@ static int rvu_register_interrupts(struct rvu *rvu)
}
rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
/* Enable ME interrupt for all PFs*/
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
@@ -2439,17 +2559,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
- dev_err(dev, "Unable to set DMA mask\n");
+ dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to set consistent DMA mask\n");
- goto err_release_regions;
- }
+ pci_set_master(pdev);
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
@@ -2489,6 +2605,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_flr;
+ rvu_setup_rvum_blk_revid(rvu);
+
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
if (err)
@@ -2506,8 +2624,10 @@ err_mbox:
rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2527,11 +2647,12 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
-
+ rvu_clear_rvum_blk_revid(rvu);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 7afb7caad873..dcf25a092008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -269,6 +269,26 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_fwdata {
+#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
+#define RVU_FWDATA_VERSION 0x0001
+ u32 header_magic;
+ u32 version; /* version id */
+
+ /* MAC address */
+#define PF_MACNUM_MAX 32
+#define VF_MACNUM_MAX 256
+ u64 pf_macs[PF_MACNUM_MAX];
+ u64 vf_macs[VF_MACNUM_MAX];
+ u64 sclk;
+ u64 rclk;
+ u64 mcam_addr;
+ u64 mcam_sz;
+ u64 msixtr_base;
+#define FWDATA_RESERVED_MEM 1023
+ u64 reserved[FWDATA_RESERVED_MEM];
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -294,6 +314,7 @@ struct rvu {
char *irq_name;
bool *irq_allocated;
dma_addr_t msix_base_iova;
+ u64 msixtr_base_phy; /* Register reset value */
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
@@ -313,6 +334,9 @@ struct rvu {
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ /* Firmware data */
+ struct rvu_fwdata *fwdata;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -363,6 +387,12 @@ static inline int is_afvf(u16 pcifunc)
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
+{
+ return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
+ (rvu->fwdata->version == RVU_FWDATA_VERSION);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index b8e8f337316f..f3c82e489897 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -590,6 +590,30 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
+ struct cgx_pause_frm_cfg *req,
+ struct cgx_pause_frm_cfg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (req->set)
+ cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ req->tx_pause, req->rx_pause);
+ else
+ cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->tx_pause, &rsp->rx_pause);
+ return 0;
+}
+
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a29e5c7c8cfc..36953d4f51c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -18,6 +18,8 @@
#include "cgx.h"
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -211,6 +213,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->tx_chan_cnt = 1;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
+
+ /* By default we enable pause frames */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
+ cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -273,6 +280,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, pf, type;
+ u16 chan_base, chan;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg & ~BIT_ULL(16));
+ }
+ return 0;
+}
+
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id)
+{
+ int bpid, blkaddr, lmac_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ lmac_chan_cnt = cfg & 0xFF;
+
+ cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
+ lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ /* Backpressure IDs range division
+ * CGX channles are mapped to (0 - 191) BPIDs
+ * LBK channles are mapped to (192 - 255) BPIDs
+ * SDP channles are mapped to (256 - 511) BPIDs
+ *
+ * Lmac channles and bpids mapped as follows
+ * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
+ * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
+ * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
+ */
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ if ((req->chan_base + req->chan_cnt) > 15)
+ return -EINVAL;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ /* Assign bpid based on cgx, lmac and chan id */
+ bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
+ (lmac_id * lmac_chan_cnt) + req->chan_base;
+
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > cgx_bpid_cnt)
+ return -EINVAL;
+ break;
+
+ case NIX_INTF_TYPE_LBK:
+ if ((req->chan_base + req->chan_cnt) > 63)
+ return -EINVAL;
+ bpid = cgx_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bpid;
+}
+
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int blkaddr, pf, type, chan_id = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u16 chan_base, chan;
+ s16 bpid, bpid_base;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+ /* Enable backpressure only for CGX mapped PFs and LBK interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ bpid = bpid_base;
+
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ if (bpid < 0) {
+ dev_warn(rvu->dev, "Fail to enable backpressure\n");
+ return -EINVAL;
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg | (bpid & 0xFF) | BIT_ULL(16));
+ chan_id++;
+ bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ }
+
+ for (chan = 0; chan < req->chan_cnt; chan++) {
+ /* Map channel and bpid assign to it */
+ rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
+ (bpid_base & 0x3FF);
+ if (req->bpid_per_chan)
+ bpid_base++;
+ }
+ rsp->chan_cnt = req->chan_cnt;
+
+ return 0;
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -565,6 +708,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -609,11 +757,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
+ spin_unlock(&aq->lock);
return rc;
}
- spin_lock(&aq->lock);
-
/* Submit the instruction to AQ */
rc = nix_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
@@ -718,6 +865,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (req->ctype == NIX_AQ_CTYPE_CQ) {
aq_req.cq.ena = 0;
aq_req.cq_mask.ena = 1;
+ aq_req.cq.bp_ena = 0;
+ aq_req.cq_mask.bp_ena = 1;
q_cnt = pfvf->cq_ctx->qsize;
bmap = pfvf->cq_bmap;
}
@@ -3061,6 +3210,9 @@ int rvu_nix_init(struct rvu *rvu)
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
nix_link_config(rvu, blkaddr);
+
+ /* Enable Channel backpressure */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
}
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 6e7c7f459f74..67471cb2b129 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
break;
}
- if (rc)
+ if (rc) {
+ spin_unlock(&aq->lock);
return rc;
-
- spin_lock(&aq->lock);
+ }
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 40e431debbe9..0a214084406a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -825,8 +825,10 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
goto load_default;
- if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+ if (!rvu->fwdata)
goto load_default;
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
goto load_default;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 9d8942acc232..a3ecb5de9000 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -11,6 +11,9 @@
#ifndef RVU_STRUCT_H
#define RVU_STRUCT_H
+/* RVU Block revision IDs */
+#define RVU_BLK_RVUM_REVID 0x01
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b945bd3d5d88..157735443473 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -220,6 +220,28 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return err;
}
+int otx2_config_pause_frm(struct otx2_nic *pfvf)
+{
+ struct cgx_pause_frm_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
+ req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
+ req->set = 1;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
@@ -580,6 +602,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
* RED accepts pkts if free pointers > 102 & <= 205.
* Drops pkts if free pointers < 102.
*/
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
@@ -741,6 +764,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (qidx < pfvf->hw.rx_queues) {
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
+
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
}
/* Fill AQ info */
@@ -996,6 +1026,14 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_addr = pool->fc_addr->iova;
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt) {
+ aq->aura.bp_ena = 0;
+ aq->aura.nix0_bpid = pfvf->bpid[0];
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
/* Fill AQ info */
aq->ctype = NPA_AQ_CTYPE_AURA;
aq->op = NPA_AQ_INSTOP_INIT;
@@ -1307,6 +1345,25 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
otx2_mbox_unlock(mbox);
}
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1355,6 +1412,17 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
pfvf->hw.nix_msixoff = rsp->nix_msixoff;
}
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int chan, chan_id;
+
+ for (chan = 0; chan < rsp->chan_cnt; chan++) {
+ chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
+ pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
+ }
+}
+
void otx2_free_cints(struct otx2_nic *pfvf, int n)
{
struct otx2_qset *qset = &pfvf->qset;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 320f3b7bf57f..885c3dcd4ac7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -204,6 +204,8 @@ struct otx2_nic {
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
+#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
struct otx2_qset qset;
@@ -216,6 +218,7 @@ struct otx2_nic {
struct workqueue_struct *mbox_wq;
u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
struct cgx_link_user_info linfo;
u64 reset_count;
@@ -558,6 +561,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+int otx2_config_pause_frm(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -578,6 +582,7 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
@@ -598,6 +603,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp);
/* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 60fcf82dd8cb..f450111423a8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -253,6 +253,45 @@ fail:
return err;
}
+static void otx2_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req)
+ return;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
+}
+
+static int otx2_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+
+ if (pause->tx_pause)
+ pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return otx2_config_pause_frm(pfvf);
+}
+
static void otx2_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -654,6 +693,8 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 85f9b9ba6bd5..22f9a326fd81 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -148,6 +148,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
mbox_handler_nix_txsch_alloc(pf,
(struct nix_txsch_alloc_rsp *)msg);
break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ break;
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
@@ -654,6 +657,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Enable backpressure */
+ otx2_nix_config_bp(pf, true);
+
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf);
if (err) {
@@ -737,6 +743,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+ otx2_mbox_lock(mbox);
+ /* Disable backpressure */
+ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_nix_config_bp(pf, false);
+ otx2_mbox_unlock(mbox);
+
/* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index a1313d57e283..6928abcec0a3 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -15,12 +15,6 @@
#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */
#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */
-#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_SIG_SYSTEM_ERROR | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY)
-
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ada1ca60f088..b2dddd8a246c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -252,12 +252,6 @@ enum {
};
-#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_SIG_SYSTEM_ERROR | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY)
-
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8bf1f08fdee2..8a5ea2543670 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2121,6 +2121,10 @@ static int mlx4_en_set_phys_id(struct net_device *dev,
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_drvinfo = mlx4_en_get_drvinfo,
.get_link_ksettings = mlx4_en_get_link_ksettings,
.set_link_ksettings = mlx4_en_set_link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index a1f20b205299..312e0a1ad43d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -78,6 +78,16 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_TC_CT
+ bool "MLX5 TC connection tracking offload support"
+ depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ default y
+ help
+ Say Y here if you want to support offloading connection tracking rules
+ via tc ct action.
+
+ If unsure, set to Y
+
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"
default y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index e0bb8e12356e..7408ae380d23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
# Netdev extra
@@ -34,9 +34,10 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
- lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
+ lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
en/tc_tun_geneve.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
+mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
#
# Core extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index ac108f1e5bd6..b7bb81b8c49b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -190,11 +190,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
return 0;
}
-enum mlx5_devlink_param_id {
- MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
-};
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -210,14 +205,38 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
return 0;
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int group_num = val.vu32;
+
+ if (group_num < 1 || group_num > 1024) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported group number, supported range is 1-1024");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
static const struct devlink_param mlx5_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
+#ifdef CONFIG_MLX5_ESWITCH
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ "fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ mlx5_devlink_large_group_num_validate),
+#endif
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -230,13 +249,20 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
else
strcpy(value.vstr, "smfs");
devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
value);
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
+#endif
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index d0ba03774ddf..f0de327a59be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -6,6 +6,12 @@
#include <net/devlink.h>
+enum mlx5_devlink_param_id {
+ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+};
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3cc439ab3253..6c4b45c2a8d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe {
struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_cyc {
@@ -880,6 +880,7 @@ struct mlx5e_priv {
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
+ struct devlink_port dl_port;
struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent;
@@ -1168,6 +1169,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
+ const u8 hfunc);
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs);
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
new file mode 100644
index 000000000000..e38495e4aa42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en/devlink.h"
+
+int mlx5e_devlink_port_register(struct net_device *netdev)
+{
+ struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
+ struct devlink *devlink;
+ int err;
+
+ priv = netdev_priv(netdev);
+ dev = priv->mdev;
+
+ if (mlx5_core_is_pf(dev))
+ devlink_port_attrs_set(&priv->dl_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ PCI_FUNC(dev->pdev->devfn),
+ false, 0,
+ NULL, 0);
+ else
+ devlink_port_attrs_set(&priv->dl_port,
+ DEVLINK_PORT_FLAVOUR_VIRTUAL,
+ 0, false, 0, NULL, 0);
+
+ devlink = priv_to_devlink(dev);
+ err = devlink_port_register(devlink, &priv->dl_port, 1);
+ if (err)
+ return err;
+ devlink_port_type_eth_set(&priv->dl_port, netdev);
+ return 0;
+}
+
+void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
+{
+ devlink_port_unregister(&priv->dl_port);
+}
+
+struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return &priv->dl_port;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
new file mode 100644
index 000000000000..3e5393a0901f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_EN_DEVLINK_H
+#define __MLX5E_EN_DEVLINK_H
+
+#include <net/devlink.h>
+#include "en.h"
+
+int mlx5e_devlink_port_register(struct net_device *dev);
+void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
+struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
new file mode 100644
index 000000000000..ea321e528749
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <linux/hashtable.h>
+
+#include "mapping.h"
+
+#define MAPPING_GRACE_PERIOD 2000
+
+struct mapping_ctx {
+ struct xarray xarray;
+ DECLARE_HASHTABLE(ht, 8);
+ struct mutex lock; /* Guards hashtable and xarray */
+ unsigned long max_id;
+ size_t data_size;
+ bool delayed_removal;
+ struct delayed_work dwork;
+ struct list_head pending_list;
+ spinlock_t pending_list_lock; /* Guards pending list */
+};
+
+struct mapping_item {
+ struct rcu_head rcu;
+ struct list_head list;
+ unsigned long timeout;
+ struct hlist_node node;
+ int cnt;
+ u32 id;
+ char data[];
+};
+
+int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
+{
+ struct mapping_item *mi;
+ int err = -ENOMEM;
+ u32 hash_key;
+
+ mutex_lock(&ctx->lock);
+
+ hash_key = jhash(data, ctx->data_size, 0);
+ hash_for_each_possible(ctx->ht, mi, node, hash_key) {
+ if (!memcmp(data, mi->data, ctx->data_size))
+ goto attach;
+ }
+
+ mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
+ if (!mi)
+ goto err_alloc;
+
+ memcpy(mi->data, data, ctx->data_size);
+ hash_add(ctx->ht, &mi->node, hash_key);
+
+ err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
+ GFP_KERNEL);
+ if (err)
+ goto err_assign;
+attach:
+ ++mi->cnt;
+ *id = mi->id;
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+
+err_assign:
+ hash_del(&mi->node);
+ kfree(mi);
+err_alloc:
+ mutex_unlock(&ctx->lock);
+
+ return err;
+}
+
+static void mapping_remove_and_free(struct mapping_ctx *ctx,
+ struct mapping_item *mi)
+{
+ xa_erase(&ctx->xarray, mi->id);
+ kfree_rcu(mi, rcu);
+}
+
+static void mapping_free_item(struct mapping_ctx *ctx,
+ struct mapping_item *mi)
+{
+ if (!ctx->delayed_removal) {
+ mapping_remove_and_free(ctx, mi);
+ return;
+ }
+
+ mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
+
+ spin_lock(&ctx->pending_list_lock);
+ list_add_tail(&mi->list, &ctx->pending_list);
+ spin_unlock(&ctx->pending_list_lock);
+
+ schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
+}
+
+int mapping_remove(struct mapping_ctx *ctx, u32 id)
+{
+ unsigned long index = id;
+ struct mapping_item *mi;
+ int err = -ENOENT;
+
+ mutex_lock(&ctx->lock);
+ mi = xa_load(&ctx->xarray, index);
+ if (!mi)
+ goto out;
+ err = 0;
+
+ if (--mi->cnt > 0)
+ goto out;
+
+ hash_del(&mi->node);
+ mapping_free_item(ctx, mi);
+out:
+ mutex_unlock(&ctx->lock);
+
+ return err;
+}
+
+int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
+{
+ unsigned long index = id;
+ struct mapping_item *mi;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ mi = xa_load(&ctx->xarray, index);
+ if (!mi)
+ goto err_find;
+
+ memcpy(data, mi->data, ctx->data_size);
+ err = 0;
+
+err_find:
+ rcu_read_unlock();
+ return err;
+}
+
+static void
+mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
+{
+ struct mapping_item *mi;
+
+ list_for_each_entry(mi, list, list)
+ mapping_remove_and_free(ctx, mi);
+}
+
+static void mapping_work_handler(struct work_struct *work)
+{
+ unsigned long min_timeout = 0, now = jiffies;
+ struct mapping_item *mi, *next;
+ LIST_HEAD(pending_items);
+ struct mapping_ctx *ctx;
+
+ ctx = container_of(work, struct mapping_ctx, dwork.work);
+
+ spin_lock(&ctx->pending_list_lock);
+ list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
+ if (time_after(now, mi->timeout))
+ list_move(&mi->list, &pending_items);
+ else if (!min_timeout ||
+ time_before(mi->timeout, min_timeout))
+ min_timeout = mi->timeout;
+ }
+ spin_unlock(&ctx->pending_list_lock);
+
+ mapping_remove_and_free_list(ctx, &pending_items);
+
+ if (min_timeout)
+ schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
+}
+
+static void mapping_flush_work(struct mapping_ctx *ctx)
+{
+ if (!ctx->delayed_removal)
+ return;
+
+ cancel_delayed_work_sync(&ctx->dwork);
+ mapping_remove_and_free_list(ctx, &ctx->pending_list);
+}
+
+struct mapping_ctx *
+mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
+{
+ struct mapping_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->max_id = max_id ? max_id : UINT_MAX;
+ ctx->data_size = data_size;
+
+ if (delayed_removal) {
+ INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
+ INIT_LIST_HEAD(&ctx->pending_list);
+ spin_lock_init(&ctx->pending_list_lock);
+ ctx->delayed_removal = true;
+ }
+
+ mutex_init(&ctx->lock);
+ xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
+
+ return ctx;
+}
+
+void mapping_destroy(struct mapping_ctx *ctx)
+{
+ mapping_flush_work(ctx);
+ xa_destroy(&ctx->xarray);
+ mutex_destroy(&ctx->lock);
+
+ kfree(ctx);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
new file mode 100644
index 000000000000..285525cc5470
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#ifndef __MLX5_MAPPING_H__
+#define __MLX5_MAPPING_H__
+
+struct mapping_ctx;
+
+int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id);
+int mapping_remove(struct mapping_ctx *ctx, u32 id);
+int mapping_find(struct mapping_ctx *ctx, u32 id, void *data);
+
+/* mapping uses an xarray to map data to ids in add(), and for find().
+ * For locking, it uses a internal xarray spin lock for add()/remove(),
+ * find() uses rcu_read_lock().
+ * Choosing delayed_removal postpones the removal of a previously mapped
+ * id by MAPPING_GRACE_PERIOD milliseconds.
+ * This is to avoid races against hardware, where we mark the packet in
+ * hardware with a previous id, and quick remove() and add() reusing the same
+ * previous id. Then find() will get the new mapping instead of the old
+ * which was used to mark the packet.
+ */
+struct mapping_ctx *mapping_create(size_t data_size, u32 max_id,
+ bool delayed_removal);
+void mapping_destroy(struct mapping_ctx *ctx);
+
+#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
new file mode 100644
index 000000000000..956d9ddcdeed
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -0,0 +1,1356 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_ct.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <linux/workqueue.h>
+
+#include "en/tc_ct.h"
+#include "en.h"
+#include "en_tc.h"
+#include "en_rep.h"
+#include "eswitch_offloads_chains.h"
+
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8)
+#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
+#define MLX5_CT_STATE_TRK_BIT BIT(2)
+
+#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
+#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
+#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
+
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
+
+struct mlx5_tc_ct_priv {
+ struct mlx5_eswitch *esw;
+ const struct net_device *netdev;
+ struct idr fte_ids;
+ struct idr tuple_ids;
+ struct rhashtable zone_ht;
+ struct mlx5_flow_table *ct;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_table *post_ct;
+ struct mutex control_lock; /* guards parallel adds/dels */
+};
+
+struct mlx5_ct_flow {
+ struct mlx5_esw_flow_attr pre_ct_attr;
+ struct mlx5_esw_flow_attr post_ct_attr;
+ struct mlx5_flow_handle *pre_ct_rule;
+ struct mlx5_flow_handle *post_ct_rule;
+ struct mlx5_ct_ft *ft;
+ u32 fte_id;
+ u32 chain_mapping;
+};
+
+struct mlx5_ct_zone_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_esw_flow_attr attr;
+ int tupleid;
+ bool nat;
+};
+
+struct mlx5_ct_ft {
+ struct rhash_head node;
+ u16 zone;
+ refcount_t refcount;
+ struct nf_flowtable *nf_ft;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct rhashtable ct_entries_ht;
+ struct list_head ct_entries_list;
+};
+
+struct mlx5_ct_entry {
+ struct list_head list;
+ u16 zone;
+ struct rhash_head node;
+ struct flow_rule *flow_rule;
+ struct mlx5_fc *counter;
+ unsigned long lastuse;
+ unsigned long cookie;
+ unsigned long restore_cookie;
+ struct mlx5_ct_zone_rule zone_rules[2];
+};
+
+static const struct rhashtable_params cts_ht_params = {
+ .head_offset = offsetof(struct mlx5_ct_entry, node),
+ .key_offset = offsetof(struct mlx5_ct_entry, cookie),
+ .key_len = sizeof(((struct mlx5_ct_entry *)0)->cookie),
+ .automatic_shrinking = true,
+ .min_size = 16 * 1024,
+};
+
+static const struct rhashtable_params zone_params = {
+ .head_offset = offsetof(struct mlx5_ct_ft, node),
+ .key_offset = offsetof(struct mlx5_ct_ft, zone),
+ .key_len = sizeof(((struct mlx5_ct_ft *)0)->zone),
+ .automatic_shrinking = true,
+};
+
+static struct mlx5_tc_ct_priv *
+mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ return uplink_priv->ct_priv;
+}
+
+static int
+mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
+ struct flow_rule *rule)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(match.mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(match.key->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ match.mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ match.key->ip_proto);
+
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &match.mask->src, sizeof(match.mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &match.key->src, sizeof(match.key->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &match.key->dst, sizeof(match.key->dst));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.mask->src, sizeof(match.mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.key->src, sizeof(match.key->src));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.key->dst, sizeof(match.key->dst));
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_sport, ntohs(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_sport, ntohs(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_dport, ntohs(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_dport, ntohs(match.key->dst));
+ break;
+
+ case IPPROTO_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_dport, ntohs(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_dport, ntohs(match.key->dst));
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(match.mask->flags));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ ntohs(match.key->flags));
+ }
+
+ return 0;
+}
+
+static void
+mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry,
+ bool nat)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+
+ ct_dbg("Deleting ct entry rule in zone %d", entry->zone);
+
+ mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
+ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
+}
+
+static void
+mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry)
+{
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+
+ mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
+}
+
+static struct flow_action_entry *
+mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule)
+{
+ struct flow_action *flow_action = &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA)
+ return act;
+ }
+
+ return NULL;
+}
+
+static int
+mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
+ u8 ct_state,
+ u32 mark,
+ u32 label,
+ u32 tupleid)
+{
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ int err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ CTSTATE_TO_REG, ct_state);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ MARK_TO_REG, mark);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ LABELS_TO_REG, label);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ TUPLEID_TO_REG, tupleid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
+ char *modact)
+{
+ u32 offset = act->mangle.offset, field;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ MLX5_SET(set_action_in, modact, length, 0);
+ if (offset == offsetof(struct iphdr, saddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV4;
+ else if (offset == offsetof(struct iphdr, daddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV4;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ MLX5_SET(set_action_in, modact, length, 0);
+ if (offset == offsetof(struct ipv6hdr, saddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 8)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 12)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96;
+ else if (offset == offsetof(struct ipv6hdr, daddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 12)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ MLX5_SET(set_action_in, modact, length, 16);
+ if (offset == offsetof(struct tcphdr, source))
+ field = MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT;
+ else if (offset == offsetof(struct tcphdr, dest))
+ field = MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ MLX5_SET(set_action_in, modact, length, 16);
+ if (offset == offsetof(struct udphdr, source))
+ field = MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT;
+ else if (offset == offsetof(struct udphdr, dest))
+ field = MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, offset, 0);
+ MLX5_SET(set_action_in, modact, field, field);
+ MLX5_SET(set_action_in, modact, data, act->mangle.val);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ struct flow_action *flow_action = &flow_rule->action;
+ struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+ struct flow_action_entry *act;
+ size_t action_size;
+ char *modact;
+ int err, i;
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE: {
+ err = alloc_mod_hdr_actions(mdev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts);
+ if (err)
+ return err;
+
+ modact = mod_acts->actions +
+ mod_acts->num_actions * action_size;
+
+ err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
+ if (err)
+ return err;
+
+ mod_acts->num_actions++;
+ }
+ break;
+
+ case FLOW_ACTION_CT_METADATA:
+ /* Handled earlier */
+ continue;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_esw_flow_attr *attr,
+ struct flow_rule *flow_rule,
+ u32 tupleid,
+ bool nat)
+{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct flow_action_entry *meta;
+ int err;
+
+ meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
+ if (!meta)
+ return -EOPNOTSUPP;
+
+ if (meta->ct_metadata.labels[1] ||
+ meta->ct_metadata.labels[2] ||
+ meta->ct_metadata.labels[3]) {
+ ct_dbg("Failed to offload ct entry due to unsupported label");
+ return -EOPNOTSUPP;
+ }
+
+ if (nat) {
+ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
+ &mod_acts);
+ if (err)
+ goto err_mapping;
+ }
+
+ err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
+ (MLX5_CT_STATE_ESTABLISHED_BIT |
+ MLX5_CT_STATE_TRK_BIT),
+ meta->ct_metadata.mark,
+ meta->ct_metadata.labels[0],
+ tupleid);
+ if (err)
+ goto err_mapping;
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mapping;
+ }
+ attr->modify_hdr = mod_hdr;
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ return 0;
+
+err_mapping:
+ dealloc_mod_hdr_actions(&mod_acts);
+ return err;
+}
+
+static int
+mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_spec spec = {};
+ u32 tupleid = 1;
+ int err;
+
+ zone_rule->nat = nat;
+
+ /* Get tuple unique id */
+ err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
+ TUPLE_ID_MAX, GFP_KERNEL);
+ if (err) {
+ netdev_warn(ct_priv->netdev,
+ "Failed to allocate tuple id, err: %d\n", err);
+ return err;
+ }
+ zone_rule->tupleid = tupleid;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
+ tupleid, nat);
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
+
+ attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = 0;
+ attr->dest_ft = ct_priv->post_ct;
+ attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->outer_match_level = MLX5_MATCH_L4;
+ attr->counter = entry->counter;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+
+ mlx5_tc_ct_set_tuple_match(&spec, flow_rule);
+ mlx5e_tc_match_to_reg_match(&spec, ZONE_TO_REG,
+ entry->zone & MLX5_CT_ZONE_MASK,
+ MLX5_CT_ZONE_MASK);
+
+ zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, &spec, attr);
+ if (IS_ERR(zone_rule->rule)) {
+ err = PTR_ERR(zone_rule->rule);
+ ct_dbg("Failed to add ct entry rule, nat: %d", nat);
+ goto err_rule;
+ }
+
+ ct_dbg("Offloaded ct entry rule in zone %d", entry->zone);
+
+ return 0;
+
+err_rule:
+ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+err_mod_hdr:
+ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
+ return err;
+}
+
+static int
+mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry)
+{
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ int err;
+
+ entry->counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
+ ct_dbg("Failed to create counter for ct entry");
+ return err;
+ }
+
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false);
+ if (err)
+ goto err_orig;
+
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true);
+ if (err)
+ goto err_nat;
+
+ return 0;
+
+err_nat:
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+err_orig:
+ mlx5_fc_destroy(esw->dev, entry->counter);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_action_entry *meta_action;
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+ int err;
+
+ meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
+ if (!meta_action)
+ return -EOPNOTSUPP;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (entry)
+ return 0;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->zone = ft->zone;
+ entry->flow_rule = flow_rule;
+ entry->cookie = flow->cookie;
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
+
+ err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry);
+ if (err)
+ goto err_rules;
+
+ err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
+ cts_ht_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&entry->list, &ft->ct_entries_list);
+
+ return 0;
+
+err_insert:
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+err_rules:
+ kfree(entry);
+ netdev_warn(ct_priv->netdev,
+ "Failed to offload ct entry, err: %d\n", err);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+ WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
+ &entry->node,
+ cts_ht_params));
+ list_del(&entry->list);
+ kfree(entry);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *f)
+{
+ unsigned long cookie = f->cookie;
+ struct mlx5_ct_entry *entry;
+ u64 lastuse, packets, bytes;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f = type_data;
+ struct mlx5_ct_ft *ft = cb_priv;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5_tc_ct_block_flow_offload_add(ft, f);
+ case FLOW_CLS_DESTROY:
+ return mlx5_tc_ct_block_flow_offload_del(ft, f);
+ case FLOW_CLS_STATS:
+ return mlx5_tc_ct_block_flow_offload_stats(ft, f);
+ default:
+ break;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct flow_dissector_key_ct *mask, *key;
+ bool trk, est, untrk, unest, new, unnew;
+ u32 ctstate = 0, ctstate_mask = 0;
+ u16 ct_state_on, ct_state_off;
+ u16 ct_state, ct_state_mask;
+ struct flow_match_ct match;
+
+ if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+ return 0;
+
+ if (!ct_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload of ct matching isn't available");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_ct(f->rule, &match);
+
+ key = match.key;
+ mask = match.mask;
+
+ ct_state = key->ct_state;
+ ct_state_mask = mask->ct_state;
+
+ if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
+ TCA_FLOWER_KEY_CT_FLAGS_NEW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "only ct_state trk, est and new are supported for offload");
+ return -EOPNOTSUPP;
+ }
+
+ if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "only lower 32bits of ct_labels are supported for offload");
+ return -EOPNOTSUPP;
+ }
+
+ ct_state_on = ct_state & ct_state_mask;
+ ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
+ trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
+ est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
+ unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+
+ ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+
+ if (new) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +new isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mask->ct_zone)
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ key->ct_zone, MLX5_CT_ZONE_MASK);
+ if (ctstate_mask)
+ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
+ ctstate, ctstate_mask);
+ if (mask->ct_mark)
+ mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
+ key->ct_mark, mask->ct_mark);
+ if (mask->ct_labels[0])
+ mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG,
+ key->ct_labels[0],
+ mask->ct_labels[0]);
+
+ return 0;
+}
+
+int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+
+ if (!ct_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload of ct action isn't available");
+ return -EOPNOTSUPP;
+ }
+
+ attr->ct_attr.zone = act->ct.zone;
+ attr->ct_attr.ct_action = act->ct.action;
+ attr->ct_attr.nf_ft = act->ct.flow_table;
+
+ return 0;
+}
+
+static struct mlx5_ct_ft *
+mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
+ struct nf_flowtable *nf_ft)
+{
+ struct mlx5_ct_ft *ft;
+ int err;
+
+ ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params);
+ if (ft) {
+ refcount_inc(&ft->refcount);
+ return ft;
+ }
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ return ERR_PTR(-ENOMEM);
+
+ ft->zone = zone;
+ ft->nf_ft = nf_ft;
+ ft->ct_priv = ct_priv;
+ INIT_LIST_HEAD(&ft->ct_entries_list);
+ refcount_set(&ft->refcount, 1);
+
+ err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
+ if (err)
+ goto err_init;
+
+ err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
+ zone_params);
+ if (err)
+ goto err_insert;
+
+ err = nf_flow_table_offload_add_cb(ft->nf_ft,
+ mlx5_tc_ct_block_flow_offload, ft);
+ if (err)
+ goto err_add_cb;
+
+ return ft;
+
+err_add_cb:
+ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
+err_insert:
+ rhashtable_destroy(&ft->ct_entries_ht);
+err_init:
+ kfree(ft);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+{
+ struct mlx5_ct_entry *entry;
+
+ list_for_each_entry(entry, &ft->ct_entries_list, list)
+ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+}
+
+static void
+mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+{
+ if (!refcount_dec_and_test(&ft->refcount))
+ return;
+
+ nf_flow_table_offload_del_cb(ft->nf_ft,
+ mlx5_tc_ct_block_flow_offload, ft);
+ mlx5_tc_ct_flush_ft(ct_priv, ft);
+ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
+ rhashtable_destroy(&ft->ct_entries_ht);
+ kfree(ft);
+}
+
+/* We translate the tc filter with CT action to the following HW model:
+ *
+ * +-------------------+ +--------------------+ +--------------+
+ * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +----->
+ * + original match + | + tuple + zone match + | + fte_id match + |
+ * +-------------------+ | +--------------------+ | +--------------+ |
+ * v v v
+ * set chain miss mapping set mark original
+ * set fte_id set label filter
+ * set zone set established actions
+ * set tunnel_id do nat (if needed)
+ * do decap
+ */
+static int
+__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *orig_spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_handle **flow_rule)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_spec post_ct_spec = {};
+ struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ct_flow *ct_flow;
+ int chain_mapping = 0, err;
+ struct mlx5_ct_ft *ft;
+ u32 fte_id = 1;
+
+ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
+ if (!ct_flow)
+ return -ENOMEM;
+
+ /* Register for CT established events */
+ ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
+ attr->ct_attr.nf_ft);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ ct_dbg("Failed to register to ft callback");
+ goto err_ft;
+ }
+ ct_flow->ft = ft;
+
+ err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id,
+ MLX5_FTE_ID_MAX, GFP_KERNEL);
+ if (err) {
+ netdev_warn(priv->netdev,
+ "Failed to allocate fte id, err: %d\n", err);
+ goto err_idr;
+ }
+ ct_flow->fte_id = fte_id;
+
+ /* Base esw attributes of both rules on original rule attribute */
+ pre_ct_attr = &ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, sizeof(*attr));
+ memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+
+ /* Modify the original rule's action to fwd and modify, leave decap */
+ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ /* Write chain miss tag for miss in ct table as we
+ * don't go though all prios of this chain as normal tc rules
+ * miss.
+ */
+ err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
+ &chain_mapping);
+ if (err) {
+ ct_dbg("Failed to get chain register mapping for chain");
+ goto err_get_chain;
+ }
+ ct_flow->chain_mapping = chain_mapping;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ CHAIN_TO_REG, chain_mapping);
+ if (err) {
+ ct_dbg("Failed to set chain register mapping");
+ goto err_mapping;
+ }
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG,
+ attr->ct_attr.zone &
+ MLX5_CT_ZONE_MASK);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ FTEID_TO_REG, fte_id);
+ if (err) {
+ ct_dbg("Failed to set fte_id register mapping");
+ goto err_mapping;
+ }
+
+ /* If original flow is decap, we do it before going into ct table
+ * so add a rewrite for the tunnel match_id.
+ */
+ if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ attr->chain == 0) {
+ u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ TUNNEL_TO_REG,
+ tun_id);
+ if (err) {
+ ct_dbg("Failed to set tunnel register mapping");
+ goto err_mapping;
+ }
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ pre_mod_acts.num_actions,
+ pre_mod_acts.actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to create pre ct mod hdr");
+ goto err_mapping;
+ }
+ pre_ct_attr->modify_hdr = mod_hdr;
+
+ /* Post ct rule matches on fte_id and executes original rule's
+ * tc rule action
+ */
+ mlx5e_tc_match_to_reg_match(&post_ct_spec, FTEID_TO_REG,
+ fte_id, MLX5_FTE_ID_MASK);
+
+ /* Put post_ct rule on post_ct fdb */
+ ct_flow->post_ct_attr.chain = 0;
+ ct_flow->post_ct_attr.prio = 0;
+ ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+
+ ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ rule = mlx5_eswitch_add_offloaded_rule(esw, &post_ct_spec,
+ &ct_flow->post_ct_attr);
+ ct_flow->post_ct_rule = rule;
+ if (IS_ERR(ct_flow->post_ct_rule)) {
+ err = PTR_ERR(ct_flow->post_ct_rule);
+ ct_dbg("Failed to add post ct rule");
+ goto err_insert_post_ct;
+ }
+
+ /* Change original rule point to ct table */
+ pre_ct_attr->dest_chain = 0;
+ pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
+ orig_spec,
+ pre_ct_attr);
+ if (IS_ERR(ct_flow->pre_ct_rule)) {
+ err = PTR_ERR(ct_flow->pre_ct_rule);
+ ct_dbg("Failed to add pre ct rule");
+ goto err_insert_orig;
+ }
+
+ attr->ct_attr.ct_flow = ct_flow;
+ *flow_rule = ct_flow->post_ct_rule;
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+
+ return 0;
+
+err_insert_orig:
+ mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
+ &ct_flow->post_ct_attr);
+err_insert_post_ct:
+ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
+err_mapping:
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+err_get_chain:
+ idr_remove(&ct_priv->fte_ids, fte_id);
+err_idr:
+ mlx5_tc_ct_del_ft_cb(ct_priv, ft);
+err_ft:
+ kfree(ct_flow);
+ netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
+ return err;
+}
+
+static int
+__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *orig_spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
+ struct mlx5_flow_handle **flow_rule)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ct_flow *ct_flow;
+ int err;
+
+ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
+ if (!ct_flow)
+ return -ENOMEM;
+
+ /* Base esw attributes on original rule attribute */
+ pre_ct_attr = &ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, sizeof(*attr));
+
+ err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
+ if (err) {
+ ct_dbg("Failed to set register for ct clear");
+ goto err_set_registers;
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts->num_actions,
+ mod_acts->actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to add create ct clear mod hdr");
+ goto err_set_registers;
+ }
+
+ dealloc_mod_hdr_actions(mod_acts);
+ pre_ct_attr->modify_hdr = mod_hdr;
+ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add ct clear rule");
+ goto err_insert;
+ }
+
+ attr->ct_attr.ct_flow = ct_flow;
+ ct_flow->pre_ct_rule = rule;
+ *flow_rule = rule;
+
+ return 0;
+
+err_insert:
+ mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
+err_set_registers:
+ netdev_warn(priv->netdev,
+ "Failed to offload ct clear flow, err %d\n", err);
+ return err;
+}
+
+struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ if (!ct_priv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&ct_priv->control_lock);
+ if (clear_action)
+ err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr,
+ mod_hdr_acts, &rule);
+ else
+ err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr,
+ &rule);
+ mutex_unlock(&ct_priv->control_lock);
+ if (err)
+ return ERR_PTR(err);
+
+ return rule;
+}
+
+static void
+__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_flow *ct_flow)
+{
+ struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+
+ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
+ pre_ct_attr);
+ mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+
+ if (ct_flow->post_ct_rule) {
+ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
+ &ct_flow->post_ct_attr);
+ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
+ }
+
+ kfree(ct_flow);
+}
+
+void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
+
+ /* We are called on error to clean up stuff from parsing
+ * but we don't have anything for now
+ */
+ if (!ct_flow)
+ return;
+
+ mutex_lock(&ct_priv->control_lock);
+ __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
+ mutex_unlock(&ct_priv->control_lock);
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
+ const char **err_msg)
+{
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ /* cannot restore chain ID on HW miss */
+
+ *err_msg = "tc skb extension missing";
+ return -EOPNOTSUPP;
+#endif
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
+ *err_msg = "firmware level support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) {
+ /* vlan workaround should be avoided for multi chain rules.
+ * This is just a sanity check as pop vlan action should
+ * be supported by any FW that supports ignore_flow_level
+ */
+
+ *err_msg = "firmware vlan actions support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev,
+ fdb_modify_header_fwd_to_table)) {
+ /* CT always writes to registers which are mod header actions.
+ * Therefore, mod header and goto is required
+ */
+
+ *err_msg = "firmware fwd and modify support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ *err_msg = "register loopback isn't supported";
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+{
+ if (msg)
+ netdev_warn(rpriv->netdev,
+ "tc ct offload not supported, %s, err: %d\n",
+ msg, err);
+ else
+ netdev_warn(rpriv->netdev,
+ "tc ct offload not supported, err: %d\n",
+ err);
+}
+
+int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ const char *msg;
+ int err;
+
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_tc_ct_init_check_support(esw, &msg);
+ if (err) {
+ mlx5_tc_ct_init_err(rpriv, msg, err);
+ goto err_support;
+ }
+
+ ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
+ if (!ct_priv) {
+ mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+ goto err_alloc;
+ }
+
+ ct_priv->esw = esw;
+ ct_priv->netdev = rpriv->netdev;
+ ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->ct)) {
+ err = PTR_ERR(ct_priv->ct);
+ mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+ goto err_ct_tbl;
+ }
+
+ ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->ct_nat)) {
+ err = PTR_ERR(ct_priv->ct_nat);
+ mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
+ err);
+ goto err_ct_nat_tbl;
+ }
+
+ ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->post_ct)) {
+ err = PTR_ERR(ct_priv->post_ct);
+ mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
+ err);
+ goto err_post_ct_tbl;
+ }
+
+ idr_init(&ct_priv->fte_ids);
+ idr_init(&ct_priv->tuple_ids);
+ mutex_init(&ct_priv->control_lock);
+ rhashtable_init(&ct_priv->zone_ht, &zone_params);
+
+ /* Done, set ct_priv to know it initializted */
+ uplink_priv->ct_priv = ct_priv;
+
+ return 0;
+
+err_post_ct_tbl:
+ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+err_ct_nat_tbl:
+ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+err_ct_tbl:
+ kfree(ct_priv);
+err_alloc:
+err_support:
+
+ return 0;
+}
+
+void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+
+ if (!ct_priv)
+ return;
+
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+
+ rhashtable_destroy(&ct_priv->zone_ht);
+ mutex_destroy(&ct_priv->control_lock);
+ idr_destroy(&ct_priv->tuple_ids);
+ idr_destroy(&ct_priv->fte_ids);
+ kfree(ct_priv);
+
+ uplink_priv->ct_priv = NULL;
+}
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_ct_zone_rule *zone_rule;
+ struct mlx5_ct_entry *entry;
+
+ if (!ct_priv || !tupleid)
+ return true;
+
+ zone_rule = idr_find(&ct_priv->tuple_ids, tupleid);
+ if (!zone_rule)
+ return false;
+
+ entry = container_of(zone_rule, struct mlx5_ct_entry,
+ zone_rules[zone_rule->nat]);
+ tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
new file mode 100644
index 000000000000..6b2c893372da
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_CT_H__
+#define __MLX5_EN_TC_CT_H__
+
+#include <net/pkt_cls.h>
+#include <linux/mlx5/fs.h>
+#include <net/tc_act/tc_ct.h>
+
+struct mlx5_esw_flow_attr;
+struct mlx5e_tc_mod_hdr_acts;
+struct mlx5_rep_uplink_priv;
+struct mlx5e_tc_flow;
+struct mlx5e_priv;
+
+struct mlx5_ct_flow;
+
+struct nf_flowtable;
+
+struct mlx5_ct_attr {
+ u16 zone;
+ u16 ct_action;
+ struct mlx5_ct_flow *ct_flow;
+ struct nf_flowtable *nf_ft;
+};
+
+#define zone_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
+ .moffset = 0,\
+ .mlen = 2,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_2) + 2,\
+}
+
+#define ctstate_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
+ .moffset = 2,\
+ .mlen = 2,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_2),\
+}
+
+#define mark_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_3),\
+}
+
+#define labels_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_4),\
+}
+
+#define fteid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_5),\
+}
+
+#define tupleid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
+ .moffset = 0,\
+ .mlen = 3,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_1),\
+}
+
+#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
+#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
+
+#if IS_ENABLED(CONFIG_MLX5_TC_CT)
+
+int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack);
+int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
+struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr);
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid);
+
+#else /* CONFIG_MLX5_TC_CT */
+
+static inline int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ return 0;
+}
+
+static inline void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+}
+
+static inline int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+}
+
+static inline bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ if (!tupleid)
+ return true;
+
+ return false;
+}
+
+#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
+#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index af4ebd2951b5..608d0e07c308 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -469,10 +469,15 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level)
+ u8 *match_level)
{
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ struct netlink_ext_ack *extack = f->common.extack;
int err = 0;
if (!tunnel) {
@@ -499,6 +504,109 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
goto out;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match;
+ u16 addr_type;
+
+ flow_rule_match_enc_control(rule, &match);
+ addr_type = match.key->addr_type;
+
+ /* For tunnel addr_type used same key id`s as for non-tunnel */
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(match.key->dst));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ETH_P_IP);
+ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ETH_P_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
+
+ if (match.mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB
+ (priv->mdev,
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ /* Enforce DMAC when offloading incoming tunneled flows.
+ * Flow counters require a match on the DMAC.
+ */
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16), priv->netdev->dev_addr);
+
+ /* let software handle IP fragments */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+
+ return 0;
+
out:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 6f9a78c85ffd..1630f0ec3ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -76,8 +76,7 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level);
+ u8 *match_level);
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 06f6f08ff5eb..6d703ddee4e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -773,6 +773,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -784,7 +785,10 @@ static void get_speed_duplex(struct net_device *netdev,
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
- speed = SPEED_UNKNOWN;
+ if (data_rate_oper)
+ speed = 100 * data_rate_oper;
+ else
+ speed = SPEED_UNKNOWN;
goto out;
}
@@ -873,17 +877,18 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- u32 rx_pause = 0;
- u32 tx_pause = 0;
- u32 eth_proto_cap;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_admin;
- u32 eth_proto_lp;
- u32 eth_proto_oper;
u8 an_disable_admin;
- u8 an_status;
+ u16 data_rate_oper;
+ u32 eth_proto_oper;
+ u32 eth_proto_cap;
u8 connector_type;
+ u32 rx_pause = 0;
+ u32 tx_pause = 0;
+ u32 eth_proto_lp;
bool admin_ext;
+ u8 an_status;
bool ext;
int err;
@@ -917,6 +922,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
connector_type = MLX5_GET(ptys_reg, out, connector_type);
+ data_rate_oper = MLX5_GET(ptys_reg, out, data_rate_oper);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
@@ -927,7 +933,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- link_ksettings);
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -1126,8 +1132,8 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -1146,8 +1152,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -1942,7 +1948,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1959,12 +1966,15 @@ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u
return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
}
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_get_strings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a230df2a45aa..f9c928afec89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
+#include "en/devlink.h"
#include "lib/mlx5.h"
@@ -4605,6 +4606,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
#endif
+ .ndo_get_devlink_port = mlx5e_get_devlink_port,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -5471,11 +5473,19 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach;
}
+ err = mlx5e_devlink_port_register(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err);
+ goto err_unregister_netdev;
+ }
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
return priv;
+err_unregister_netdev:
+ unregister_netdev(netdev);
err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
@@ -5497,6 +5507,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
+ mlx5e_devlink_port_unregister(priv);
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 5df8f50b76e7..a33d15156ed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -192,7 +192,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
if (err) {
- pr_warn("vport %d error %d reading stats\n", rep->vport, err);
+ netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
+ rep->vport, err);
return;
}
@@ -252,25 +253,6 @@ static int mlx5e_rep_set_ringparam(struct net_device *dev,
return mlx5e_ethtool_set_ringparam(priv, param);
}
-static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_flow_handle *flow_rule;
-
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- dest);
- if (IS_ERR(flow_rule))
- return PTR_ERR(flow_rule);
-
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
- rpriv->vport_rx_rule = flow_rule;
- return 0;
-}
-
static void mlx5e_rep_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
@@ -283,33 +265,8 @@ static int mlx5e_rep_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- u16 curr_channels_amount = priv->channels.params.num_channels;
- u32 new_channels_amount = ch->combined_count;
- struct mlx5_flow_destination new_dest;
- int err = 0;
- err = mlx5e_ethtool_set_channels(priv, ch);
- if (err)
- return err;
-
- if (curr_channels_amount == 1 && new_channels_amount > 1) {
- new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest.ft = priv->fs.ttc.ft.t;
- } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
- new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- new_dest.tir_num = priv->direct_tir[0].tirn;
- } else {
- return 0;
- }
-
- err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
- if (err) {
- netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
- curr_channels_amount, new_channels_amount);
- return err;
- }
-
- return 0;
+ return mlx5e_ethtool_set_channels(priv, ch);
}
static int mlx5e_rep_get_coalesce(struct net_device *netdev,
@@ -375,6 +332,9 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -391,6 +351,9 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
};
static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -406,6 +369,10 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
@@ -1422,7 +1389,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1435,7 +1402,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1448,7 +1415,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_start_xmit = mlx5e_xmit,
.ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1464,6 +1431,11 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_set_features = mlx5e_set_features,
};
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
+}
+
bool mlx5e_eswitch_rep(struct net_device *netdev)
{
if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
@@ -1584,6 +1556,8 @@ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct ttc_params ttc_params = {};
int tt, err;
@@ -1593,6 +1567,11 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* The inner_ttc in the ttc params is intentionally not set */
ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
mlx5e_set_ttc_ft_params(&ttc_params);
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ /* To give uplik rep TTC a lower level for chaining from root ft */
+ ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
+
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
@@ -1604,6 +1583,52 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
return 0;
}
+static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err = 0;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ /* non uplik reps will skip any bypass tables and go directly to
+ * their own ttc
+ */
+ rpriv->root_ft = priv->fs.ttc.ft.t;
+ return 0;
+ }
+
+ /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
+ ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
+ ft_attr.prio = 1;
+ ft_attr.level = 1;
+
+ rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(rpriv->root_ft)) {
+ err = PTR_ERR(rpriv->root_ft);
+ rpriv->root_ft = NULL;
+ }
+
+ return err;
+}
+
+static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ return;
+ mlx5_destroy_flow_table(rpriv->root_ft);
+}
+
static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -1612,11 +1637,10 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_destination dest;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest.tir_num = priv->direct_tir[0].tirn;
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- &dest);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rpriv->root_ft;
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
rpriv->vport_rx_rule = flow_rule;
@@ -1656,12 +1680,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_tirs;
- err = mlx5e_create_rep_vport_rx_rule(priv);
+ err = mlx5e_create_rep_root_ft(priv);
if (err)
goto err_destroy_ttc_table;
+ err = mlx5e_create_rep_vport_rx_rule(priv);
+ if (err)
+ goto err_destroy_root_ft;
+
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
+err_destroy_root_ft:
+ mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
@@ -1682,6 +1714,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, false);
@@ -1920,7 +1953,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
@@ -1940,7 +1973,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_ul_rep_stats_grps,
@@ -2026,8 +2059,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
- pr_warn("Failed to create representor netdev for vport %d\n",
- rep->vport);
+ mlx5_core_warn(dev,
+ "Failed to create representor netdev for vport %d\n",
+ rep->vport);
kfree(rpriv);
return -EINVAL;
}
@@ -2045,29 +2079,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
- pr_warn("Failed to attach representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to attach representor netdev for vport %d\n",
+ rep->vport);
goto err_destroy_mdev_resources;
}
err = mlx5e_rep_neigh_init(rpriv);
if (err) {
- pr_warn("Failed to initialized neighbours handling for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to initialized neighbours handling for vport %d\n",
+ rep->vport);
goto err_detach_netdev;
}
err = register_devlink_port(dev, rpriv);
if (err) {
- esw_warn(dev, "Failed to register devlink port %d\n",
- rep->vport);
+ netdev_warn(netdev, "Failed to register devlink port %d\n",
+ rep->vport);
goto err_neigh_cleanup;
}
err = register_netdev(netdev);
if (err) {
- pr_warn("Failed to register representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to register representor netdev for vport %d\n",
+ rep->vport);
goto err_devlink_cleanup;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 3f756d51435f..6a2337900420 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -55,6 +55,7 @@ struct mlx5e_neigh_update_table {
unsigned long min_interval; /* jiffies */
};
+struct mlx5_tc_ct_priv;
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -81,12 +82,20 @@ struct mlx5_rep_uplink_priv {
struct mutex unready_flows_lock;
struct list_head unready_flows;
struct work_struct reoffload_flows_work;
+
+ /* maps tun_info to a unique id*/
+ struct mapping_ctx *tunnel_mapping;
+ /* maps tun_enc_opts to a unique id*/
+ struct mapping_ctx *tunnel_enc_opts_mapping;
+
+ struct mlx5_tc_ct_priv *ct_priv;
};
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
+ struct mlx5_flow_table *root_ft;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
@@ -191,6 +200,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
@@ -200,6 +211,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_rep(struct net_device *netdev);
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 065c74a2d0c5..57b24946fb74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1195,6 +1195,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
@@ -1227,13 +1228,78 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
+ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ goto free_wqe;
+
napi_gro_receive(rq->cq.napi, skb);
+ mlx5_tc_rep_post_napi_receive(&tc_priv);
+
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
+
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct mlx5e_tc_update_priv tc_priv = {};
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5_wq_ll *wq;
+ struct sk_buff *skb;
+ u16 cqe_bcnt;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
+ goto mpwrq_cqe_out;
+ }
+
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+ skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ mlx5e_skb_from_cqe_mpwrq_linear,
+ mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ rq, wi, cqe_bcnt, head_offset, page_idx);
+ if (!skb)
+ goto mpwrq_cqe_out;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ goto mpwrq_cqe_out;
+
+ napi_gro_receive(rq->cq.napi, skb);
+
+ mlx5_tc_rep_post_napi_receive(&tc_priv);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
#endif
struct sk_buff *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 74091f72c9a8..044891a03be3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -55,10 +55,14 @@
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
+#include "en/mapping.h"
+#include "en/tc_ct.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h"
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
@@ -84,6 +88,7 @@ enum {
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
+ MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -134,6 +139,8 @@ struct mlx5e_tc_flow {
refcount_t refcnt;
struct rcu_head rcu_head;
struct completion init_done;
+ int tunnel_id; /* the mapped tunnel id of this flow */
+
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -144,15 +151,118 @@ struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
- int num_mod_hdr_actions;
- int max_mod_hdr_actions;
- void *mod_hdr_actions;
+ struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
+struct tunnel_match_key {
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_keyid enc_key_id;
+ struct flow_dissector_key_ports enc_tp;
+ struct flow_dissector_key_ip enc_ip;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+
+ int filter_ifindex;
+};
+
+/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
+ * Upper TUNNEL_INFO_BITS for general tunnel info.
+ * Lower ENC_OPTS_BITS bits for enc_opts.
+ */
+#define TUNNEL_INFO_BITS 6
+#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
+#define ENC_OPTS_BITS 2
+#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
+#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
+#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
+
+struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
+ [CHAIN_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
+ .moffset = 0,
+ .mlen = 2,
+ },
+ [TUNNEL_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
+ .moffset = 3,
+ .mlen = 1,
+ .soffset = MLX5_BYTE_OFF(fte_match_param,
+ misc_parameters_2.metadata_reg_c_1),
+ },
+ [ZONE_TO_REG] = zone_to_reg_ct,
+ [CTSTATE_TO_REG] = ctstate_to_reg_ct,
+ [MARK_TO_REG] = mark_to_reg_ct,
+ [LABELS_TO_REG] = labels_to_reg_ct,
+ [FTEID_TO_REG] = fteid_to_reg_ct,
+ [TUPLEID_TO_REG] = tupleid_to_reg_ct,
+};
+
+static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+
+void
+mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data,
+ u32 mask)
+{
+ int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ void *headers_c = spec->match_criteria;
+ void *headers_v = spec->match_value;
+ void *fmask, *fval;
+
+ fmask = headers_c + soffset;
+ fval = headers_v + soffset;
+
+ mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
+ data = cpu_to_be32(data) >> (32 - (match_len * 8));
+
+ memcpy(fmask, &mask, match_len);
+ memcpy(fval, &data, match_len);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
+int
+mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
+{
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
+ int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
+ int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ char *modact;
+ int err;
+
+ err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr_acts);
+ if (err)
+ return err;
+
+ modact = mod_hdr_acts->actions +
+ (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+
+ /* Firmware has 5bit length field and 0 means 32bits */
+ if (mlen == 4)
+ mlen = 0;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field, mfield);
+ MLX5_SET(set_action_in, modact, offset, moffset * 8);
+ MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, data, data);
+ mod_hdr_acts->num_actions++;
+
+ return 0;
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -210,8 +320,6 @@ struct mlx5e_mod_hdr_entry {
int compl_result;
};
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
-
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
@@ -361,10 +469,10 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mod_hdr_key key;
u32 hash_key;
- num_actions = parse_attr->num_mod_hdr_actions;
+ num_actions = parse_attr->mod_hdr_acts.num_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
- key.actions = parse_attr->mod_hdr_actions;
+ key.actions = parse_attr->mod_hdr_acts.actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
@@ -954,7 +1062,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_hdr = attr->modify_hdr;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1043,8 +1151,16 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (flow_flag_test(flow, CT)) {
+ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
+
+ return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
+ mod_hdr_acts);
+ }
+
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
@@ -1063,10 +1179,15 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_esw_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
+ if (flow_flag_test(flow, CT)) {
+ mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
+ return;
+ }
+
if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1076,17 +1197,17 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
static struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *slow_attr)
+ struct mlx5_flow_spec *spec)
{
+ struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
+ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr.split_count = 0;
+ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
@@ -1095,14 +1216,15 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *slow_attr)
+ struct mlx5e_tc_flow *flow)
{
- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+ struct mlx5_esw_flow_attr slow_attr;
+
+ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
+ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr.split_count = 0;
+ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1173,7 +1295,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int out_index;
if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
- NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@@ -1184,13 +1307,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
*/
max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
- NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
- NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested priority is out of supported range");
return -EOPNOTSUPP;
}
@@ -1220,7 +1345,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1237,14 +1362,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid) {
- /* continue with goto slow path rule instead */
- struct mlx5_esw_flow_attr slow_attr;
-
- flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
- } else {
+ if (!encap_valid)
+ flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
+ else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
- }
if (IS_ERR(flow->rule[0]))
return PTR_ERR(flow->rule[0]);
@@ -1272,9 +1393,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5_esw_flow_attr slow_attr;
int out_index;
+ mlx5e_put_flow_tunnel_id(flow);
+
if (flow_flag_test(flow, NOT_READY)) {
remove_unready_flow(flow);
kvfree(attr->parse_attr);
@@ -1283,7 +1405,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
@@ -1312,7 +1434,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr slow_attr, *esw_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@@ -1365,7 +1487,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
flow->rule[0] = rule;
/* was unset when slow path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -1377,7 +1499,6 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@@ -1389,7 +1510,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@@ -1664,150 +1785,272 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
}
}
+static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_action *flow_action = &rule->action;
+ const struct flow_action_entry *act;
+ int i;
-static int parse_tunnel_attr(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct net_device *filter_dev, u8 *match_level)
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_GOTO:
+ return true;
+ default:
+ continue;
+ }
+ }
+
+ return false;
+}
+
+static int
+enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
+ struct flow_dissector_key_enc_opts *opts,
+ struct netlink_ext_ack *extack,
+ bool *dont_care)
+{
+ struct geneve_opt *opt;
+ int off = 0;
+
+ *dont_care = true;
+
+ while (opts->len > off) {
+ opt = (struct geneve_opt *)&opts->data[off];
+
+ if (!(*dont_care) || opt->opt_class || opt->type ||
+ memchr_inv(opt->opt_data, 0, opt->length * 4)) {
+ *dont_care = false;
+
+ if (opt->opt_class != U16_MAX ||
+ opt->type != U8_MAX ||
+ memchr_inv(opt->opt_data, 0xFF,
+ opt->length * 4)) {
+ NL_SET_ERR_MSG(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
+ netdev_warn(priv->netdev,
+ "Partial match of tunnel options in chain > 0 isn't supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ off += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ return 0;
+}
+
+#define COPY_DISSECTOR(rule, diss_key, dst)\
+({ \
+ struct flow_rule *__rule = (rule);\
+ typeof(dst) __dst = dst;\
+\
+ memcpy(__dst,\
+ skb_flow_dissector_target(__rule->match.dissector,\
+ diss_key,\
+ __rule->match.key),\
+ sizeof(*__dst));\
+})
+
+static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev)
{
- struct netlink_ext_ack *extack = f->common.extack;
- void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
+ struct flow_match_enc_opts enc_opts_match;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tunnel_match_key tunnel_key;
+ bool enc_opts_is_dont_care = true;
+ u32 tun_id, enc_opts_id = 0;
+ struct mlx5_eswitch *esw;
+ u32 value, mask;
int err;
- err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
- headers_c, headers_v, match_level);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "failed to parse tunnel attributes");
+ esw = priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ memset(&tunnel_key, 0, sizeof(tunnel_key));
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ &tunnel_key.enc_control);
+ if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ &tunnel_key.enc_ipv4);
+ else
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ &tunnel_key.enc_ipv6);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
+ &tunnel_key.enc_tp);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
+ &tunnel_key.enc_key_id);
+ tunnel_key.filter_ifindex = filter_dev->ifindex;
+
+ err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
+ if (err)
return err;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_match_control match;
- u16 addr_type;
+ flow_rule_match_enc_opts(rule, &enc_opts_match);
+ err = enc_opts_is_dont_care_or_full_match(priv,
+ enc_opts_match.mask,
+ extack,
+ &enc_opts_is_dont_care);
+ if (err)
+ goto err_enc_opts;
- flow_rule_match_enc_control(rule, &match);
- addr_type = match.key->addr_type;
+ if (!enc_opts_is_dont_care) {
+ err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_match.key, &enc_opts_id);
+ if (err)
+ goto err_enc_opts;
+ }
- /* For tunnel addr_type used same key id`s as for non-tunnel */
- if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_match_ipv4_addrs match;
+ value = tun_id << ENC_OPTS_BITS | enc_opts_id;
+ mask = enc_opts_id ? TUNNEL_ID_MASK :
+ (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
- flow_rule_match_enc_ipv4_addrs(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(match.mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(match.key->src));
+ if (attr->chain) {
+ mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
+ TUNNEL_TO_REG, value, mask);
+ } else {
+ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
+ err = mlx5e_tc_match_to_reg_set(priv->mdev,
+ mod_hdr_acts,
+ TUNNEL_TO_REG, value);
+ if (err)
+ goto err_set;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(match.mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(match.key->dst));
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IP);
- } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_match_ipv6_addrs match;
-
- flow_rule_match_enc_ipv6_addrs(rule, &match);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
-
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IPV6);
- }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_match_ip match;
+ flow->tunnel_id = value;
+ return 0;
- flow_rule_match_enc_ip(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
- match.mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
- match.key->tos & 0x3);
+err_set:
+ if (enc_opts_id)
+ mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id);
+err_enc_opts:
+ mapping_remove(uplink_priv->tunnel_mapping, tun_id);
+ return err;
+}
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
- match.mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
- match.key->tos >> 2);
+static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
+{
+ u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
+ u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5_eswitch *esw;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
- match.mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
- match.key->ttl);
+ esw = flow->priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (tun_id)
+ mapping_remove(uplink_priv->tunnel_mapping, tun_id);
+ if (enc_opts_id)
+ mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id);
+}
- if (match.mask->ttl &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB
- (priv->mdev,
- ft_field_support.outer_ipv4_ttl)) {
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
+{
+ return flow->tunnel_id;
+}
+
+static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev,
+ u8 *match_level,
+ bool *match_inner)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct netlink_ext_ack *extack = f->common.extack;
+ bool needs_mapping, sets_mapping;
+ int err;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return -EOPNOTSUPP;
+
+ needs_mapping = !!flow->esw_attr->chain;
+ sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
+ *match_inner = !needs_mapping;
+
+ if ((needs_mapping || sets_mapping) &&
+ !mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ NL_SET_ERR_MSG(extack,
+ "Chains on tunnel devices isn't supported without register metadata support");
+ netdev_warn(priv->netdev,
+ "Chains on tunnel devices isn't supported without register metadata support");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow->esw_attr->chain) {
+ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
+ match_level);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Matching on TTL is not supported");
- return -EOPNOTSUPP;
+ "Failed to parse tunnel attributes");
+ netdev_warn(priv->netdev,
+ "Failed to parse tunnel attributes");
+ return err;
}
+ flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
- /* Enforce DMAC when offloading incoming tunneled flows.
- * Flow counters require a match on the DMAC.
- */
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dmac_47_16), priv->netdev->dev_addr);
+ if (!needs_mapping && !sets_mapping)
+ return 0;
- /* let software handle IP fragments */
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+ return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
+}
- return 0;
+static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ inner_headers);
}
-static void *get_match_headers_criteria(u32 flags,
- struct mlx5_flow_spec *spec)
+static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
{
- return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
- MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- inner_headers) :
- MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
+ return MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ inner_headers);
+}
+
+static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+}
+
+static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
}
static void *get_match_headers_value(u32 flags,
struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
- MLX5_ADDR_OF(fte_match_param, spec->match_value,
- inner_headers) :
- MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
+ get_match_inner_headers_value(spec) :
+ get_match_outer_headers_value(spec);
+}
+
+static void *get_match_headers_criteria(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ get_match_inner_headers_criteria(spec) :
+ get_match_outer_headers_criteria(spec);
}
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
@@ -1845,6 +2088,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
@@ -1885,6 +2129,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
@@ -1894,18 +2139,22 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mlx5e_get_tc_tun(filter_dev)) {
- if (parse_tunnel_attr(priv, spec, f, filter_dev,
- outer_match_level))
- return -EOPNOTSUPP;
+ bool match_inner = false;
- /* At this point, header pointers should point to the inner
- * headers, outer header were already set by parse_tunnel_attr
- */
- match_level = inner_match_level;
- headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
- spec);
- headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
- spec);
+ err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
+ outer_match_level, &match_inner);
+ if (err)
+ return err;
+
+ if (match_inner) {
+ /* header pointers should point to the inner headers
+ * if the packet was decapsulated already.
+ * outer headers are set by parse_tunnel_attr.
+ */
+ match_level = inner_match_level;
+ headers_c = get_match_inner_headers_criteria(spec);
+ headers_v = get_match_inner_headers_value(spec);
+ }
}
err = mlx5e_flower_parse_meta(filter_dev, f);
@@ -2222,8 +2471,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
- &outer_match_level);
+ err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
+ &inner_match_level, &outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
@@ -2383,25 +2632,26 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
-/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, attr->num_mod_hdr_actions
- * says how many HW actions were actually parsed.
- */
-static int offload_pedit_fields(struct pedit_headers_action *hdrs,
+static int offload_pedit_fields(struct mlx5e_priv *priv,
+ int namespace,
+ struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, nactions, max_actions, first, last, next_z;
+ int i, action_size, first, last, next_z;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5_fields *f;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
+ int err;
u8 cmd;
+ mod_acts = &parse_attr->mod_hdr_acts;
headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
@@ -2411,11 +2661,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
add_vals = &hdrs[1].vals;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- action = parse_attr->mod_hdr_actions +
- parse_attr->num_mod_hdr_actions * action_size;
-
- max_actions = parse_attr->max_mod_hdr_actions;
- nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2441,13 +2686,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
- if (nactions == max_actions) {
- NL_SET_ERR_MSG_MOD(extack,
- "too many pedit actions, can't offload");
- printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
- return -EOPNOTSUPP;
- }
-
skip = false;
if (s_mask) {
void *match_mask = headers_c + f->match_offset;
@@ -2494,6 +2732,18 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
+ err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "too many pedit actions, can't offload");
+ mlx5_core_warn(priv->mdev,
+ "mlx5: parsed %d pedit actions, can't do more\n",
+ mod_acts->num_actions);
+ return err;
+ }
+
+ action = mod_acts->actions +
+ (mod_acts->num_actions * action_size);
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
@@ -2516,11 +2766,9 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
- action += action_size;
- nactions++;
+ ++mod_acts->num_actions;
}
- parse_attr->num_mod_hdr_actions = nactions;
return 0;
}
@@ -2533,34 +2781,52 @@ static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
}
-static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
- struct pedit_headers_action *hdrs,
- int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
+ int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
- int nkeys, action_size, max_actions;
+ int action_size, new_num_actions, max_hw_actions;
+ size_t new_sz, old_sz;
+ void *ret;
- nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
+ return 0;
- max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
- /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
- max_actions = min(max_actions, nkeys * 16);
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
- if (!parse_attr->mod_hdr_actions)
+ max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
+ namespace);
+ new_num_actions = min(max_hw_actions,
+ mod_hdr_acts->actions ?
+ mod_hdr_acts->max_actions * 2 : 1);
+ if (mod_hdr_acts->max_actions == new_num_actions)
+ return -ENOSPC;
+
+ new_sz = action_size * new_num_actions;
+ old_sz = mod_hdr_acts->max_actions * action_size;
+ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
+ if (!ret)
return -ENOMEM;
- parse_attr->max_mod_hdr_actions = max_actions;
+ memset(ret + old_sz, 0, new_sz - old_sz);
+ mod_hdr_acts->actions = ret;
+ mod_hdr_acts->max_actions = new_num_actions;
+
return 0;
}
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ kfree(mod_hdr_acts->actions);
+ mod_hdr_acts->actions = NULL;
+ mod_hdr_acts->num_actions = 0;
+ mod_hdr_acts->max_actions = 0;
+}
+
static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
@@ -2608,13 +2874,8 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
int err;
u8 cmd;
- if (!parse_attr->mod_hdr_actions) {
- err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
- if (err)
- goto out_err;
- }
-
- err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
+ err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
+ action_flags, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
@@ -2634,8 +2895,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return 0;
out_dealloc_parsed_actions:
- kfree(parse_attr->mod_hdr_actions);
-out_err:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
return err;
}
@@ -2680,7 +2940,9 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static bool is_action_keys_supported(const struct flow_action_entry *act)
+static int is_action_keys_supported(const struct flow_action_entry *act,
+ bool ct_flow, bool *modify_ip_header,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -2699,7 +2961,13 @@ static bool is_action_keys_supported(const struct flow_action_entry *act)
if (offset != offsetof(struct iphdr, ttl) ||
ttl_word->protocol ||
ttl_word->check) {
- return true;
+ *modify_ip_header = true;
+ }
+
+ if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of ipv4 address with action ct");
+ return -EOPNOTSUPP;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -2708,15 +2976,27 @@ static bool is_action_keys_supported(const struct flow_action_entry *act)
if (offset != offsetof(struct ipv6hdr, payload_len) ||
hoplimit_word->payload_len ||
hoplimit_word->nexthdr) {
- return true;
+ *modify_ip_header = true;
+ }
+
+ if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of ipv6 address with action ct");
+ return -EOPNOTSUPP;
}
+ } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
+ htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of transport header ports with action ct");
+ return -EOPNOTSUPP;
}
- return false;
+
+ return 0;
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
- u32 actions,
+ u32 actions, bool ct_flow,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
@@ -2724,7 +3004,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i;
+ int i, err;
headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
@@ -2739,10 +3019,10 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- if (is_action_keys_supported(act)) {
- modify_ip_header = true;
- break;
- }
+ err = is_action_keys_supported(act, ct_flow,
+ &modify_ip_header, extack);
+ if (err)
+ return err;
}
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
@@ -2764,23 +3044,42 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ struct net_device *filter_dev = parse_attr->filter_dev;
+ bool drop_action, pop_action, ct_flow;
u32 actions;
- if (mlx5e_is_eswitch_flow(flow))
+ ct_flow = flow_flag_test(flow, CT);
+ if (mlx5e_is_eswitch_flow(flow)) {
actions = flow->esw_attr->action;
- else
+
+ if (flow->esw_attr->split_count && ct_flow) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't offload mirroring with action ct");
+ return -EOPNOTSUPP;
+ }
+ } else {
actions = flow->nic_attr->action;
+ }
- if (flow_flag_test(flow, EGRESS) &&
- !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
- return false;
+ drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP;
+ pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+
+ if (flow_flag_test(flow, EGRESS) && !drop_action) {
+ /* We only support filters on tunnel device, or on vlan
+ * devices if they have pop/drop action
+ */
+ if (!mlx5e_get_tc_tun(filter_dev) ||
+ (is_vlan_dev(filter_dev) && !pop_action))
+ return false;
+ }
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec,
flow_action, actions,
- extack);
+ ct_flow, extack);
return true;
}
@@ -2836,8 +3135,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
- hdrs, NULL);
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -2882,6 +3180,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (!flow_action_has_entries(flow_action))
return -EINVAL;
+ if (!flow_action_hw_stats_types_check(flow_action, extack,
+ FLOW_ACTION_HW_STATS_TYPE_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
flow_action_for_each(i, act, flow_action) {
@@ -2899,7 +3201,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, extack);
+ hdrs, extack);
if (err)
return err;
@@ -2968,9 +3270,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
/* in case all pedit actions are skipped, remove the MOD_HDR
* flag.
*/
- if (parse_attr->num_mod_hdr_actions == 0) {
+ if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
}
}
@@ -3313,6 +3615,45 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
+static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ u32 actions,
+ struct netlink_ext_ack *extack)
+{
+ u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_esw_chains_backwards_supported(esw) &&
+ dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3327,13 +3668,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ bool encap = false, decap = false;
+ u32 action = attr->action;
int err, i, if_count = 0;
- bool encap = false;
- u32 action = 0;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
+ if (!flow_action_hw_stats_types_check(flow_action, extack,
+ FLOW_ACTION_HW_STATS_TYPE_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -3343,7 +3688,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, extack);
+ hdrs, extack);
if (err)
return err;
@@ -3381,8 +3726,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
- pr_err("can't support more than %d output ports, can't offload forwarding\n",
- attr->out_count);
+ netdev_warn(priv->netdev,
+ "can't support more than %d output ports, can't offload forwarding\n",
+ attr->out_count);
return -EOPNOTSUPP;
}
@@ -3405,6 +3751,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
+ struct mlx5e_rep_priv *rep_priv;
if (is_duplicated_output_device(priv->netdev,
out_dev,
@@ -3440,11 +3787,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
+ /* Don't allow forwarding between uplink.
+ *
+ * Input vport was stored esw_attr->in_rep.
+ * In LAG case, *priv* is the private data of
+ * uplink which may be not the input vport.
+ */
+ rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
+ if (mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
+ mlx5e_eswitch_uplink_rep(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are both uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ }
+
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name,
+ out_dev->name);
return -EOPNOTSUPP;
}
@@ -3463,8 +3828,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name,
+ out_dev->name);
return -EINVAL;
}
}
@@ -3506,28 +3873,24 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->split_count = attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ decap = true;
break;
- case FLOW_ACTION_GOTO: {
- u32 dest_chain = act->chain_index;
- u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
+ case FLOW_ACTION_GOTO:
+ err = mlx5_validate_goto_chain(esw, flow, act, action,
+ extack);
+ if (err)
+ return err;
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain <= attr->chain) {
- NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
- }
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = dest_chain;
+ attr->dest_chain = act->chain_index;
+ break;
+ case FLOW_ACTION_CT:
+ err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
+ if (err)
+ return err;
+
+ flow_flag_set(flow, CT);
break;
- }
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
@@ -3556,9 +3919,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
* flag. we might have set split_count either by pedit or
* pop/push. if there is no pop/push either, reset it too.
*/
- if (parse_attr->num_mod_hdr_actions == 0) {
+ if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
attr->split_count = 0;
@@ -3570,8 +3933,25 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (attr->dest_chain) {
+ if (decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack,
+ "Decap with goto isn't supported");
+ netdev_warn(priv->netdev,
+ "Decap with goto isn't supported");
+ return -EOPNOTSUPP;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -3579,7 +3959,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!(attr->action &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rule must have at least one forward/drop action");
return -EOPNOTSUPP;
}
@@ -3750,6 +4131,10 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
+ if (err)
+ goto err_free;
+
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
@@ -4034,7 +4419,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto errout;
}
- if (mlx5e_is_offloaded_flow(flow)) {
+ if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
goto errout;
@@ -4125,6 +4510,9 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!flow_action_basic_hw_stats_types_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
@@ -4294,12 +4682,63 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
- return rhashtable_init(tc_ht, &tc_ht_params);
+ const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *priv;
+ struct mapping_ctx *mapping;
+ int err;
+
+ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+
+ err = mlx5_tc_ct_init(uplink_priv);
+ if (err)
+ goto err_ct;
+
+ mapping = mapping_create(sizeof(struct tunnel_match_key),
+ TUNNEL_INFO_BITS_MASK, true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto err_tun_mapping;
+ }
+ uplink_priv->tunnel_mapping = mapping;
+
+ mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto err_enc_opts_mapping;
+ }
+ uplink_priv->tunnel_enc_opts_mapping = mapping;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ goto err_ht_init;
+
+ return err;
+
+err_ht_init:
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+err_enc_opts_mapping:
+ mapping_destroy(uplink_priv->tunnel_mapping);
+err_tun_mapping:
+ mlx5_tc_ct_clean(uplink_priv);
+err_ct:
+ netdev_warn(priv->netdev,
+ "Failed to initialize tc (eswitch), err: %d", err);
+ return err;
}
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+
+ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+ mapping_destroy(uplink_priv->tunnel_mapping);
+
+ mlx5_tc_ct_clean(uplink_priv);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -4331,3 +4770,147 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
}
mutex_unlock(&rpriv->unready_flows_lock);
}
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct flow_dissector_key_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
+ int err;
+
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
+
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
+ if (err) {
+ WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
+ return false;
+ }
+
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
+ return false;
+ }
+ }
+
+ tun_dst = tun_rx_dst(enc_opts.len);
+ if (!tun_dst) {
+ WARN_ON_ONCE(true);
+ return false;
+ }
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ 0, /* label */
+ key.enc_tp.src, key.enc_tp.dst,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ TUNNEL_KEY);
+
+ if (enc_opts.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
+ enc_opts.len, enc_opts.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
+
+ /* Set tun_dev so we do dev_put() after datapath */
+ tc_priv->tun_dev = dev;
+
+ skb->dev = dev;
+
+ return true;
+}
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int tunnel_moffset;
+ int err;
+
+ reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
+ reg_c0 = 0;
+ reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
+
+ if (!reg_c0)
+ return true;
+
+ priv = netdev_priv(skb->dev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ reg_c0, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ tc_skb_ext->chain = chain;
+
+ tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+ return false;
+ }
+
+ tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
+ tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
+
+void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->tun_dev)
+ dev_put(tc_priv->tun_dev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 262cdb7b69b1..abdcfa4c4e0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -91,9 +91,63 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
+enum mlx5e_tc_attr_to_reg {
+ CHAIN_TO_REG,
+ TUNNEL_TO_REG,
+ CTSTATE_TO_REG,
+ ZONE_TO_REG,
+ MARK_TO_REG,
+ LABELS_TO_REG,
+ FTEID_TO_REG,
+ TUPLEID_TO_REG,
+};
+
+struct mlx5e_tc_attr_to_reg_mapping {
+ int mfield; /* rewrite field */
+ int moffset; /* offset of mfield */
+ int mlen; /* bytes to rewrite/match */
+
+ int soffset; /* offset of spec for match */
+};
+
+extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
+struct mlx5e_tc_update_priv {
+ struct net_device *tun_dev;
+};
+
+bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv);
+
+void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+
+struct mlx5e_tc_mod_hdr_acts {
+ int num_actions;
+ int max_actions;
+ void *actions;
+};
+
+int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data);
+
+void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data,
+ u32 mask);
+
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
+ int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+
+struct mlx5e_tc_flow;
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e49acd0c5da5..25640864c375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -39,6 +39,7 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "devlink.h"
#include "ecpf.h"
enum {
@@ -2006,6 +2007,25 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw_disable_vport(esw, vport);
}
+static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ &val);
+ if (!err) {
+ esw->params.large_group_num = val.vu32;
+ } else {
+ esw_warn(esw->dev,
+ "Devlink can't get param fdb_large_groups, uses default (%d).\n",
+ ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
+ esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ }
+}
+
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
int err;
@@ -2022,6 +2042,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ mlx5_eswitch_get_devlink_param(esw);
+
esw_create_tsar(esw);
esw->mode = mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 4472710ccc9c..2e0417dd8ce3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#include "en/tc_ct.h"
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -55,6 +56,8 @@
#ifdef CONFIG_MLX5_ESWITCH
+#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
+
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@@ -183,12 +186,22 @@ struct mlx5_eswitch_fdb {
int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv;
+ struct {
+ DECLARE_HASHTABLE(table, 8);
+ /* Protects vports.table */
+ struct mutex lock;
+ } vports;
+
} offloads;
};
u32 flags;
};
struct mlx5_esw_offload {
+ struct mlx5_flow_table *ft_offloads_restore;
+ struct mlx5_flow_group *restore_group;
+ struct mlx5_modify_hdr *restore_copy_hdr_id;
+
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
@@ -224,6 +237,7 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
+ MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
};
struct mlx5_eswitch {
@@ -255,6 +269,9 @@ struct mlx5_eswitch {
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
+ struct {
+ u32 large_group_num;
+ } params;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -375,6 +392,7 @@ enum {
enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
+ MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
};
struct mlx5_esw_flow_attr {
@@ -405,6 +423,9 @@ struct mlx5_esw_flow_attr {
u16 prio;
u32 dest_chain;
u32 flags;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *dest_ft;
+ struct mlx5_ct_attr ct_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
@@ -414,7 +435,6 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
@@ -623,6 +643,14 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
+u32
+esw_get_max_restore_tag(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -638,6 +666,12 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
+static struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1a57b2bd74b8..c36185eb5fbb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -50,6 +50,181 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
+/* Per vport tables */
+
+#define MLX5_ESW_VPORT_TABLE_SIZE 128
+
+/* This struct is used as a key to the hash table and we need it to be packed
+ * so hash result is consistent
+ */
+struct mlx5_vport_key {
+ u32 chain;
+ u16 prio;
+ u16 vport;
+ u16 vhca_id;
+} __packed;
+
+struct mlx5_vport_table {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ struct mlx5_vport_key key;
+};
+
+#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+
+static struct mlx5_flow_table *
+esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb;
+
+ ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
+ ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
+ ft_attr.prio = FDB_PER_VPORT;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
+ PTR_ERR(fdb));
+ }
+
+ return fdb;
+}
+
+static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_vport_key *key)
+{
+ key->vport = attr->in_rep->vport;
+ key->chain = attr->chain;
+ key->prio = attr->prio;
+ key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ return jhash(key, sizeof(*key), 0);
+}
+
+/* caller must hold vports.lock */
+static struct mlx5_vport_table *
+esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
+{
+ struct mlx5_vport_table *e;
+
+ hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
+ if (!memcmp(&e->key, skey, sizeof(*skey)))
+ return e;
+
+ return NULL;
+}
+
+static void
+esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key key;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &key);
+ e = esw_vport_tbl_lookup(esw, &key, hkey);
+ if (!e || --e->num_rules)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_table(e->fdb);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+}
+
+static struct mlx5_flow_table *
+esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key skey;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ if (e) {
+ e->num_rules++;
+ goto out;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ fdb = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ fdb = ERR_PTR(-ENOENT);
+ goto err_ns;
+ }
+
+ fdb = esw_vport_tbl_create(esw, ns);
+ if (IS_ERR(fdb))
+ goto err_ns;
+
+ e->fdb = fdb;
+ e->num_rules = 1;
+ e->key = skey;
+ hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return e->fdb;
+
+err_ns:
+ kfree(e);
+err_alloc:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return fdb;
+}
+
+int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_flow_attr attr = {};
+ struct mlx5_eswitch_rep rep = {};
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport *vport;
+ int i;
+
+ attr.prio = 1;
+ attr.in_rep = &rep;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ attr.in_rep->vport = vport->vport;
+ fdb = esw_vport_tbl_get(esw, &attr);
+ if (IS_ERR(fdb))
+ goto out;
+ }
+ return 0;
+
+out:
+ mlx5_esw_vport_tbl_put(esw);
+ return PTR_ERR(fdb);
+}
+
+void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_flow_attr attr = {};
+ struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport *vport;
+ int i;
+
+ attr.prio = 1;
+ attr.in_rep = &rep;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ attr.in_rep->vport = vport->vport;
+ esw_vport_tbl_put(esw, &attr);
+ }
+}
+
+/* End: Per vport tables */
+
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@@ -85,7 +260,8 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
attr->in_rep->vport));
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
@@ -148,7 +324,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
struct mlx5_flow_table *ft;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ if (attr->dest_ft) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = attr->dest_ft;
+ i++;
+ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
@@ -191,8 +372,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++;
}
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
-
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE)
@@ -201,8 +380,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
- !!split);
+ if (split) {
+ fdb = esw_vport_tbl_get(esw, attr);
+ } else {
+ if (attr->chain || attr->prio)
+ fdb = mlx5_esw_chains_get_table(esw, attr->chain,
+ attr->prio, 0);
+ else
+ fdb = attr->fdb;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ }
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@@ -221,7 +410,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
+ if (split)
+ esw_vport_tbl_put(esw, attr);
+ else if (attr->chain || attr->prio)
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
@@ -247,7 +439,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
goto err_get_fast;
}
- fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
+ fwd_fdb = esw_vport_tbl_get(esw, attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -285,7 +477,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ esw_vport_tbl_put(esw, attr);
err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
@@ -312,11 +504,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
- !!split);
+ if (split)
+ esw_vport_tbl_put(esw, attr);
+ else if (attr->chain || attr->prio)
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
+ 0);
if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
@@ -578,14 +773,21 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
+{
+ return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
+ MLX5_FDB_TO_VPORT_REG_C_1;
+}
+
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
+ u8 curr, wanted;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
+ !mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
@@ -593,22 +795,33 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
if (err)
return err;
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
+ curr = MLX5_GET(query_esw_vport_context_out, out,
+ esw_vport_context.fdb_to_vport_reg_c_id);
+ wanted = MLX5_FDB_TO_VPORT_REG_C_0;
+ if (mlx5_eswitch_reg_c1_loopback_supported(esw))
+ wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
if (enable)
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ curr |= wanted;
else
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ curr &= ~wanted;
MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
+ esw_vport_context.fdb_to_vport_reg_c_id, curr);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
- return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
- in, sizeof(in));
+ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
+ sizeof(in));
+ if (!err) {
+ if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
+ esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
+ else
+ esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
+ }
+
+ return err;
}
static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
@@ -621,7 +834,8 @@ static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
@@ -836,6 +1050,56 @@ out:
return err;
}
+struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
+{
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
+ struct mlx5_flow_context *flow_context;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ ESW_CHAIN_TAG_METADATA_MASK);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
+
+ flow_context = &spec->flow_context;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ flow_context->flow_tag = tag;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = esw->offloads.ft_offloads;
+
+ flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kfree(spec);
+
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev,
+ "Failed to create restore rule for tag: %d, err(%d)\n",
+ tag, (int)PTR_ERR(flow_rule));
+
+ return flow_rule;
+}
+
+u32
+esw_get_max_restore_tag(struct mlx5_eswitch *esw)
+{
+ return ESW_CHAIN_TAG_METADATA_MASK;
+}
+
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -851,8 +1115,9 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0);
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
} else {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
@@ -1057,6 +1322,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
}
ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
+ ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
@@ -1134,7 +1400,8 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
@@ -1160,6 +1427,140 @@ out:
return flow_rule;
}
+
+static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
+{
+ u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+ struct mlx5_core_dev *dev = esw->dev;
+ int vport;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == MLX5_ESWITCH_NONE)
+ return -EOPNOTSUPP;
+
+ switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ mlx5_mode = MLX5_INLINE_MODE_NONE;
+ goto out;
+ case MLX5_CAP_INLINE_MODE_L2:
+ mlx5_mode = MLX5_INLINE_MODE_L2;
+ goto out;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ goto query_vports;
+ }
+
+query_vports:
+ mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
+ mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
+ mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+ if (prev_mlx5_mode != mlx5_mode)
+ return -EINVAL;
+ prev_mlx5_mode = mlx5_mode;
+ }
+
+out:
+ *mode = mlx5_mode;
+ return 0;
+}
+
+static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
+ mlx5_destroy_flow_group(offloads->restore_group);
+ mlx5_destroy_flow_table(offloads->ft_offloads_restore);
+}
+
+static int esw_create_restore_table(struct mlx5_eswitch *esw)
+{
+ u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_modify_hdr *mod_hdr;
+ void *match_criteria, *misc;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ esw_warn(esw->dev, "Failed to create restore table, err %d\n",
+ err);
+ goto out_free;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ ESW_CHAIN_TAG_METADATA_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft_attr.max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create restore flow group, err: %d\n",
+ err);
+ goto err_group;
+ }
+
+ MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, modact, src_field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(copy_action_in, modact, dst_field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_KERNEL, 1,
+ modact);
+ if (IS_ERR(mod_hdr)) {
+ esw_warn(dev, "Failed to create restore mod header, err: %d\n",
+ err);
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+
+ esw->offloads.ft_offloads_restore = ft;
+ esw->offloads.restore_group = g;
+ esw->offloads.restore_copy_hdr_id = mod_hdr;
+
+ return 0;
+
+err_mod_hdr:
+ mlx5_destroy_flow_group(g);
+err_group:
+ mlx5_destroy_flow_table(ft);
+out_free:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
@@ -1604,11 +2005,19 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
+ u32 key;
+
+ key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
+ key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
- MLX5_SET(set_action_in, action, data,
- mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ MLX5_SET(set_action_in, action, data, key);
+ MLX5_SET(set_action_in, action, offset,
+ ESW_SOURCE_PORT_METADATA_OFFSET);
+ MLX5_SET(set_action_in, action, length,
+ ESW_SOURCE_PORT_METADATA_BITS);
vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
@@ -1837,6 +2246,18 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
+static bool
+esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
+{
+ return mlx5_core_mp_enabled(esw->dev);
+}
+
+static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
+{
+ return esw_check_vport_match_metadata_mandatory(esw) &&
+ esw_check_vport_match_metadata_supported(esw);
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
@@ -1875,7 +2296,7 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int err;
- if (esw_check_vport_match_metadata_supported(esw))
+ if (esw_use_vport_metadata(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
@@ -1911,27 +2332,34 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
return err;
- err = esw_create_offloads_fdb_tables(esw, total_vports);
+ err = esw_create_offloads_table(esw, total_vports);
if (err)
- goto create_fdb_err;
+ goto create_offloads_err;
- err = esw_create_offloads_table(esw, total_vports);
+ err = esw_create_restore_table(esw);
+ if (err)
+ goto create_restore_err;
+
+ err = esw_create_offloads_fdb_tables(esw, total_vports);
if (err)
- goto create_ft_err;
+ goto create_fdb_err;
err = esw_create_vport_rx_group(esw, total_vports);
if (err)
goto create_fg_err;
+ mutex_init(&esw->fdb_table.offloads.vports.lock);
+ hash_init(esw->fdb_table.offloads.vports.table);
+
return 0;
create_fg_err:
- esw_destroy_offloads_table(esw);
-
-create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
-
create_fdb_err:
+ esw_destroy_restore_table(esw);
+create_restore_err:
+ esw_destroy_offloads_table(esw);
+create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
return err;
@@ -1939,9 +2367,11 @@ create_fdb_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw);
- esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
+ esw_destroy_restore_table(esw);
+ esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
}
@@ -2291,43 +2721,6 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
-{
- u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
- struct mlx5_core_dev *dev = esw->dev;
- int vport;
-
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == MLX5_ESWITCH_NONE)
- return -EOPNOTSUPP;
-
- switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
- case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
- mlx5_mode = MLX5_INLINE_MODE_NONE;
- goto out;
- case MLX5_CAP_INLINE_MODE_L2:
- mlx5_mode = MLX5_INLINE_MODE_L2;
- goto out;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- goto query_vports;
- }
-
-query_vports:
- mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
- if (prev_mlx5_mode != mlx5_mode)
- return -EINVAL;
- prev_mlx5_mode = mlx5_mode;
- }
-
-out:
- *mode = mlx5_mode;
- return 0;
-}
-
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack)
@@ -2464,15 +2857,53 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
vport_num <= esw->dev->priv.sriov.max_vfs;
}
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
+}
+EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
+
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num)
{
- return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
+ u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
+ u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
+ u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ u32 val;
+
+ /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
+ WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
+
+ /* Trim vhca_id to ESW_VHCA_ID_BITS */
+ vhca_id &= vhca_id_mask;
+
+ /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
+ * don't overlap with VF numbers, and themselves, after trimming.
+ */
+ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
+ vport_num_mask - 1);
+ WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
+ vport_num_mask - 1);
+ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
+ (MLX5_VPORT_ECPF & vport_num_mask));
+
+ /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
+ * overlap with pf and ecpf.
+ */
+ if (vport_num != MLX5_VPORT_UPLINK &&
+ vport_num != MLX5_VPORT_ECPF)
+ WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
+
+ /* We can now trim vport_num to ESW_VPORT_BITS */
+ vport_num &= vport_num_mask;
+
+ val = (vhca_id << ESW_VPORT_BITS) | vport_num;
+ return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
index 4276194b633f..0702c216a031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
@@ -6,14 +6,17 @@
#include <linux/mlx5/fs.h>
#include "eswitch_offloads_chains.h"
+#include "en/mapping.h"
#include "mlx5_core.h"
#include "fs_core.h"
#include "eswitch.h"
#include "en.h"
+#include "en_tc.h"
#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
+#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
@@ -21,8 +24,6 @@
#define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
-#define ESW_OFFLOADS_NUM_GROUPS 4
-
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool,
@@ -36,6 +37,7 @@ static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
128 };
+#define ESW_FT_TBL_SZ (64 * 1024)
struct mlx5_esw_chains_priv {
struct rhashtable chains_ht;
@@ -44,6 +46,7 @@ struct mlx5_esw_chains_priv {
struct mutex lock;
struct mlx5_flow_table *tc_end_fdb;
+ struct mapping_ctx *chains_mapping;
int fdb_left[ARRAY_SIZE(ESW_POOLS)];
};
@@ -54,9 +57,12 @@ struct fdb_chain {
u32 chain;
int ref;
+ int id;
struct mlx5_eswitch *esw;
struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
};
struct fdb_prio_key {
@@ -99,6 +105,11 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
}
+bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
+{
+ return fdb_ignore_flow_level_supported(esw);
+}
+
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
@@ -198,7 +209,9 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
+ sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
+ mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
+ mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
if (!sz)
return ERR_PTR(-ENOSPC);
ft_attr.max_fte = sz;
@@ -234,7 +247,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
}
ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev,
@@ -255,6 +268,70 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
mlx5_destroy_flow_table(fdb);
}
+static int
+create_fdb_chain_restore(struct fdb_chain *fdb_chain)
+{
+ char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+ struct mlx5_modify_hdr *mod_hdr;
+ u32 index;
+ int err;
+
+ if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw))
+ return 0;
+
+ err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
+ if (err)
+ return err;
+ if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
+ /* we got the special default flow tag id, so we won't know
+ * if we actually marked the packet with the restore rule
+ * we create.
+ *
+ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
+ */
+ err = mapping_add(esw_chains_mapping(esw),
+ &fdb_chain->chain, &index);
+ mapping_remove(esw_chains_mapping(esw),
+ MLX5_FS_DEFAULT_FLOW_TAG);
+ if (err)
+ return err;
+ }
+
+ fdb_chain->id = index;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
+ MLX5_SET(set_action_in, modact, offset,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
+ MLX5_SET(set_action_in, modact, length,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
+ MLX5_SET(set_action_in, modact, data, fdb_chain->id);
+ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ 1, modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+ fdb_chain->miss_modify_hdr = mod_hdr;
+
+ fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
+ if (IS_ERR(fdb_chain->restore_rule)) {
+ err = PTR_ERR(fdb_chain->restore_rule);
+ goto err_rule;
+ }
+
+ return 0;
+
+err_rule:
+ mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
+err_mod_hdr:
+ /* Datapath can't find this mapping, so we can safely remove it */
+ mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
+ return err;
+}
+
static struct fdb_chain *
mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
{
@@ -269,6 +346,10 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
fdb_chain->chain = chain;
INIT_LIST_HEAD(&fdb_chain->prios_list);
+ err = create_fdb_chain_restore(fdb_chain);
+ if (err)
+ goto err_restore;
+
err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params);
if (err)
@@ -277,6 +358,12 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
return fdb_chain;
err_insert:
+ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) {
+ mlx5_del_flow_rules(fdb_chain->restore_rule);
+ mlx5_modify_header_dealloc(esw->dev,
+ fdb_chain->miss_modify_hdr);
+ }
+err_restore:
kvfree(fdb_chain);
return ERR_PTR(err);
}
@@ -288,6 +375,15 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params);
+
+ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) {
+ mlx5_del_flow_rules(fdb_chain->restore_rule);
+ mlx5_modify_header_dealloc(esw->dev,
+ fdb_chain->miss_modify_hdr);
+
+ mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
+ }
+
kvfree(fdb_chain);
}
@@ -310,10 +406,12 @@ mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
}
static struct mlx5_flow_handle *
-mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
+mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
+ struct mlx5_flow_table *fdb,
struct mlx5_flow_table *next_fdb)
{
static const struct mlx5_flow_spec spec = {};
+ struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act act = {};
@@ -322,6 +420,11 @@ mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_fdb;
+ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) {
+ act.modify_hdr = fdb_chain->miss_modify_hdr;
+ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
}
@@ -345,7 +448,8 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
list_for_each_entry_continue_reverse(pos,
&fdb_chain->prios_list,
list) {
- miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb,
+ miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
+ pos->fdb,
next_fdb);
if (IS_ERR(miss_rules[n])) {
err = PTR_ERR(miss_rules[n]);
@@ -459,7 +563,7 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
}
/* Add miss rule to next_fdb */
- miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb);
+ miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
if (IS_ERR(miss_rule)) {
err = PTR_ERR(miss_rule);
goto err_miss_rule;
@@ -618,12 +722,43 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
return tc_end_fdb(esw);
}
+struct mlx5_flow_table *
+mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
+{
+ int chain, prio, level, err;
+
+ if (!fdb_ignore_flow_level_supported(esw)) {
+ err = -EOPNOTSUPP;
+
+ esw_warn(esw->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_esw_chains_get_chain_range(esw),
+ prio = mlx5_esw_chains_get_prio_range(esw);
+ level = mlx5_esw_chains_get_level_range(esw);
+
+ return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_esw_chains_destroy_fdb_table(esw, ft);
+}
+
static int
mlx5_esw_chains_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_chains_priv *chains_priv;
struct mlx5_core_dev *dev = esw->dev;
u32 max_flow_counter, fdb_max;
+ struct mapping_ctx *mapping;
int err;
chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
@@ -637,7 +772,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+ max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw);
@@ -660,10 +795,20 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
if (err)
goto init_prios_ht_err;
+ mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
+ true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto mapping_err;
+ }
+ esw_chains_mapping(esw) = mapping;
+
mutex_init(&esw_chains_lock(esw));
return 0;
+mapping_err:
+ rhashtable_destroy(&esw_prios_ht(esw));
init_prios_ht_err:
rhashtable_destroy(&esw_chains_ht(esw));
init_chains_ht_err:
@@ -675,6 +820,7 @@ static void
mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
{
mutex_destroy(&esw_chains_lock(esw));
+ mapping_destroy(esw_chains_mapping(esw));
rhashtable_destroy(&esw_prios_ht(esw));
rhashtable_destroy(&esw_chains_ht(esw));
@@ -704,12 +850,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw)
/* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) {
- ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
-
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
+ err = mlx5_esw_vport_tbl_get(esw);
+ if (err)
goto level_1_err;
- }
}
return 0;
@@ -725,7 +868,7 @@ static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
- mlx5_esw_chains_put_table(esw, 0, 1, 1);
+ mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
}
@@ -756,3 +899,30 @@ mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
mlx5_esw_chains_close(esw);
mlx5_esw_chains_cleanup(esw);
}
+
+int
+mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
+ u32 *chain_mapping)
+{
+ return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping);
+}
+
+int
+mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping)
+{
+ return mapping_remove(esw_chains_mapping(esw), chain_mapping);
+}
+
+int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
+ u32 *chain)
+{
+ int err;
+
+ err = mapping_find(esw_chains_mapping(esw), tag, chain);
+ if (err) {
+ esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);
+ return -ENOENT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
index 2e13097fe348..f3b9ae6798f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
@@ -6,6 +6,8 @@
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
+bool
+mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32
@@ -23,8 +25,23 @@ mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
+struct mlx5_flow_table *
+mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
+void
+mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
+ u32 chain_mapping);
+
int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
-#endif /* __ML5_ESW_CHAINS_H__ */
+int
+mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index dc08ed9339ab..f3a925e5ba88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -181,7 +181,7 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
const struct mlx5_flow_spec *spec)
{
- u32 port_mask, port_value;
+ u16 port_mask, port_value;
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return spec->flow_context.flow_source ==
@@ -191,7 +191,7 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
misc_parameters.source_port);
port_value = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.source_port);
- return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+ return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 4c61d25d2e88..b794888fa3ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context {
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
- u8 command[0];
+ u8 command[];
};
struct mlx5_fpga_esp_xfrm;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9dc24241dc91..bd0b2e4f3446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -110,9 +110,9 @@
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
-#define OFFLOADS_MAX_FT 1
-#define OFFLOADS_NUM_PRIOS 1
-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+#define OFFLOADS_MAX_FT 2
+#define OFFLOADS_NUM_PRIOS 2
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
@@ -145,7 +145,7 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
LAG_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
OFFLOADS_MAX_FT))),
@@ -2700,6 +2700,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err;
}
+ /* We put this priority last, knowing that nothing will get here
+ * unless explicitly forwarded to. This is possible because the
+ * slow path tables have catch all rules and nothing gets passed
+ * those tables.
+ */
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
set_prio_attrs(steering->fdb_root_ns);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index ab69effb056d..f43caefd07a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -470,7 +470,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
- struct mlx5_fc fcs[0];
+ struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 90cb50fe17fd..1eef66ee849e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -235,6 +235,9 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
}
const struct ethtool_ops mlx5i_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5i_get_drvinfo,
.get_strings = mlx5i_get_strings,
.get_sset_count = mlx5i_get_sset_count,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index c87962cab921..de7e01a027bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -56,7 +56,7 @@ struct mlx5i_priv {
u32 qkey;
u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl;
- char *mlx5e_priv[0];
+ char *mlx5e_priv[];
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index 3fc575d1c3ec..dcea87ec5977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -42,7 +42,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 01c380425f9d..f3b29d9ade1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
+ u16 uid)
+{
+ return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
+ entry->uid == uid);
+}
+
/* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry.
* If the table is full, return NULL
*/
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
- struct mlx5_rate_limit *rl)
+ void *rl_in, u16 uid, bool dedicated)
{
struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false;
int i;
for (i = 0; i < table->max_size; i++) {
- if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
- return &table->rl_entry[i];
- if (!empty_found && !table->rl_entry[i].rl.rate) {
+ if (dedicated) {
+ if (!table->rl_entry[i].refcount)
+ return &table->rl_entry[i];
+ continue;
+ }
+
+ if (table->rl_entry[i].refcount) {
+ if (table->rl_entry[i].dedicated)
+ continue;
+ if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
+ uid))
+ return &table->rl_entry[i];
+ } else if (!empty_found) {
empty_found = true;
ret_entry = &table->rl_entry[i];
}
@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
}
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
- u16 index,
- struct mlx5_rate_limit *rl)
+ struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
+ void *pp_context;
+ pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
- MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
- MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
+ MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
+ if (set)
+ memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
- struct mlx5_rate_limit *rl)
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
int err = 0;
+ u32 rate;
+ rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock);
- if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
- rl->rate, table->min_rate, table->max_rate);
+ rate, table->min_rate, table->max_rate);
err = -EINVAL;
goto out;
}
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */
entry->refcount++;
} else {
+ memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
+ entry->uid = uid;
/* new rate limit */
- err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
+ err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
- mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
- err, rl->rate, rl->max_burst_sz,
- rl->typical_pkt_sz);
+ mlx5_core_err(
+ dev,
+ "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ err, rate,
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ burst_upper_bound),
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ typical_packet_size));
goto out;
}
- entry->rl = *rl;
+
entry->refcount = 1;
+ entry->dedicated = dedicated_entry;
}
*index = entry->index;
@@ -202,20 +230,61 @@ out:
mutex_unlock(&table->rl_lock);
return err;
}
+EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
+
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+
+ mutex_lock(&table->rl_lock);
+ entry = &table->rl_entry[index - 1];
+ entry->refcount--;
+ if (!entry->refcount)
+ /* need to remove rate */
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
+ return mlx5_rl_add_rate_raw(dev, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0,
+ false, index);
+}
EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL;
- struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */
if (rl->rate == 0)
return;
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
mutex_lock(&table->rl_lock);
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
}
entry->refcount--;
- if (!entry->refcount) {
+ if (!entry->refcount)
/* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl);
- entry->rl = reset_rl;
- }
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
out:
mutex_unlock(&table->rl_lock);
@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- struct mlx5_rate_limit rl = {0};
int i;
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
- if (table->rl_entry[i].rl.rate)
- mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index,
- &rl);
+ if (table->rl_entry[i].refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
+ false);
kfree(dev->priv.rl_table.rl_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6dec2a550a10..f899da9f8488 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
dest_action = action;
if (!action->dest_tbl.is_fw_tbl) {
if (action->dest_tbl.tbl->dmn != dmn) {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
goto out_invalid_arg;
}
@@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->dest_tbl.fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0;
} else {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Failed mlx5_cmd_query_flow_table ret: %d\n",
ret);
return ret;
@@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* Check action duplication */
if (++action_type_set[action_type] > max_actions_type) {
- mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n",
+ mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
action_type, max_actions_type);
goto out_invalid_arg;
}
@@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (dr_action_validate_and_get_next_state(action_domain,
action_type,
&state)) {
- mlx5dr_dbg(dmn, "Invalid action sequence provided\n");
+ mlx5dr_err(dmn, "Invalid action sequence provided\n");
return -EOPNOTSUPP;
}
}
@@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Failed to handle checksum recalculation err %d\n",
ret);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index a9da961d4d2f..48b6358b6845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
- mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
+ mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
return ret;
}
@@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
ret = dr_domain_query_vports(dmn);
if (ret) {
- mlx5dr_dbg(dmn, "Failed to query vports caps\n");
+ mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
goto free_vports_caps;
}
@@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
int ret;
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
- mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
+ mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
return -EOPNOTSUPP;
}
@@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
+ mlx5dr_err(dmn, "Failed to get esw manager vport\n");
return -ENOENT;
}
@@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
break;
default:
- mlx5dr_dbg(dmn, "Invalid domain\n");
+ mlx5dr_err(dmn, "Invalid domain\n");
ret = -EINVAL;
break;
}
@@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->mutex);
if (dr_domain_caps_init(mdev, dmn)) {
- mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
+ mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain;
}
@@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
mutex_lock(&dmn->mutex);
ret = mlx5dr_send_ring_force_drain(dmn);
mutex_unlock(&dmn->mutex);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
+ flags, ret);
return ret;
+ }
}
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index d7c7467e2d53..30d2d7376f56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
if (err) {
dr_icm_chill_buckets_abort(pool, bucket, buckets);
- mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
+ mlx5dr_err(pool->dmn, "Sync_steering failed\n");
chunk = NULL;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index c6dbd856df94..a95938874798 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) {
- mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n");
+ mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
return -EINVAL;
}
/* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) {
- mlx5dr_info(dmn, "Mask contains unsupported parameters\n");
+ mlx5dr_err(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP;
}
}
@@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
if (!nic_matcher->ste_builder) {
- mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
+ mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
@@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
int ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
- mlx5dr_info(dmn, "Invalid match criteria attribute\n");
+ mlx5dr_err(dmn, "Invalid match criteria attribute\n");
return -EINVAL;
}
if (mask) {
if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
- mlx5dr_info(dmn, "Invalid match size attribute\n");
+ mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
mlx5dr_ste_copy_param(matcher->match_criteria,
@@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *tbl,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index e4cff7abb348..cce3ee7a6614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -826,8 +826,8 @@ again:
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_htbl_put(cur_htbl);
- mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
- cur_htbl->chunk_size);
+ mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
+ cur_htbl->chunk_size);
} else {
cur_htbl = new_htbl;
}
@@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
if (!value_size ||
(value_size > sizeof(struct mlx5dr_match_param) ||
(value_size % sizeof(u32)))) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false;
}
@@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->outer), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
return false;
}
}
@@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
return false;
}
}
@@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->inner), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
return false;
}
}
@@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc2), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
return false;
}
}
@@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc3), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
return false;
}
}
@@ -1221,7 +1221,7 @@ remove_action_members:
dr_rule_remove_action_members(rule);
free_rule:
kfree(rule);
- mlx5dr_info(dmn, "Failed creating rule\n");
+ mlx5dr_err(dmn, "Failed creating rule\n");
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index c7f10d4f8f8d..a93ed3c3b6c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
&dr_qp->wq_ctrl);
if (err) {
- mlx5_core_info(mdev, "Can't create QP WQ\n");
+ mlx5_core_warn(mdev, "Can't create QP WQ\n");
goto err_wq;
}
@@ -651,8 +651,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
/* Init */
ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP rst2init\n");
return ret;
+ }
/* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
@@ -667,8 +669,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
return ret;
+ }
/* RTS */
rts_attr.timeout = 14;
@@ -676,8 +680,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rts_attr.rnr_retry = 7;
ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
return ret;
+ }
return 0;
}
@@ -861,6 +867,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
cq_size = QUEUE_SIZE + 1;
dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
if (!dmn->send_ring->cq) {
+ mlx5dr_err(dmn, "Failed creating CQ\n");
ret = -ENOMEM;
goto free_send_ring;
}
@@ -872,6 +879,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) {
+ mlx5dr_err(dmn, "Failed creating QP\n");
ret = -ENOMEM;
goto clean_cq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index aade62a9ee5c..c0e3a1e7389d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
- mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
+ mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 14ce2d7dbb66..c2fe48d7b75a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
DR_CHUNK_SIZE_1,
MLX5DR_STE_LU_TYPE_DONT_CARE,
0);
- if (!nic_tbl->s_anchor)
+ if (!nic_tbl->s_anchor) {
+ mlx5dr_err(dmn, "Failed allocating htbl\n");
return -ENOMEM;
+ }
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_dmn->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
nic_tbl->s_anchor,
&info, true);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed int and send htbl\n");
goto free_s_anchor;
+ }
mlx5dr_htbl_get(nic_tbl->s_anchor);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index dffe35145d19..3fa739951b34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -705,7 +705,7 @@ struct mlx5dr_matcher {
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
struct list_head matcher_list;
- u16 prio;
+ u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index c2027192e21e..d12d3a2d46ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_group *fg)
{
struct mlx5dr_matcher *matcher;
- u16 priority = MLX5_GET(create_flow_group_in, in,
+ u32 priority = MLX5_GET(create_flow_group_in, in,
start_flow_index);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
in,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index e1edc9c247b7..e09e4ea1b045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -59,7 +59,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask);
@@ -151,7 +151,7 @@ mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
static inline struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask) { return NULL; }
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 79057af4fe99..5d9ddf36fb4e 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -496,7 +496,7 @@ mlxfw_mfa2_file_component_tlv_get(const struct mlxfw_mfa2_file *mfa2_file,
struct mlxfw_mfa2_comp_data {
struct mlxfw_mfa2_component comp;
- u8 buff[0];
+ u8 buff[];
};
static const struct mlxfw_mfa2_tlv_component_descriptor *
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
index 33c971190bba..2014a5de5a01 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -11,7 +11,7 @@ struct mlxfw_mfa2_tlv {
u8 version;
u8 type;
__be16 len;
- u8 data[0];
+ u8 data[];
} __packed;
static inline const struct mlxfw_mfa2_tlv *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 673fa2fd995c..51709012593e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1783,6 +1783,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_TBF:
return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_FIFO:
+ return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 9708156e7871..81801c6fb941 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -139,6 +139,7 @@ struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
+struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -276,8 +277,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
- struct mlxsw_sp_qdisc *root_qdisc;
- struct mlxsw_sp_qdisc *tclass_qdiscs;
+ struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
@@ -641,7 +641,8 @@ struct mlxsw_sp_acl_rule_info {
struct mlxsw_afa_block *act_block;
u8 action_created:1,
ingress_bind_blocker:1,
- egress_bind_blocker:1;
+ egress_bind_blocker:1,
+ counter_valid:1;
unsigned int counter_index;
};
@@ -867,6 +868,8 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_ets_qopt_offload *p);
int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_tbf_qopt_offload *p);
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 36b264798f04..6f8d5005ff36 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -642,8 +642,14 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_counter(rulei->act_block,
- &rulei->counter_index, extack);
+ int err;
+
+ err = mlxsw_afa_block_append_counter(rulei->act_block,
+ &rulei->counter_index, extack);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -857,16 +863,18 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_rule_info *rulei;
- u64 current_packets;
- u64 current_bytes;
+ u64 current_packets = 0;
+ u64 current_bytes = 0;
int err;
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
- &current_packets, &current_bytes);
- if (err)
- return err;
-
+ if (rulei->counter_valid) {
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets,
+ &current_bytes);
+ if (err)
+ return err;
+ }
*packets = current_packets - rule->last_packets;
*bytes = current_bytes - rule->last_bytes;
*last_use = rule->last_used;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 0011a71114e3..88aa554415df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -26,11 +26,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (!flow_action_has_entries(flow_action))
return 0;
+ if (!flow_action_mixed_hw_stats_types_check(flow_action, extack))
+ return -EOPNOTSUPP;
- /* Count action is inserted first */
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
- if (err)
- return err;
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats_type == FLOW_ACTION_HW_STATS_TYPE_ANY ||
+ act->hw_stats_type == FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE) {
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
+ if (err)
+ return err;
+ } else if (act->hw_stats_type != FLOW_ACTION_HW_STATS_TYPE_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 13a054c0ce0f..b9f429ec0db4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -20,14 +20,17 @@ enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_PRIO,
MLXSW_SP_QDISC_ETS,
MLXSW_SP_QDISC_TBF,
+ MLXSW_SP_QDISC_FIFO,
};
+struct mlxsw_sp_qdisc;
+
struct mlxsw_sp_qdisc_ops {
enum mlxsw_sp_qdisc_type type;
int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params);
- int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
@@ -64,6 +67,25 @@ struct mlxsw_sp_qdisc {
struct mlxsw_sp_qdisc_ops *ops;
};
+struct mlxsw_sp_qdisc_state {
+ struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
+
+ /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
+ * created first. When notifications for these FIFOs arrive, it is not
+ * known what qdisc their parent handle refers to. It could be a
+ * newly-created PRIO that will replace the currently-offloaded one, or
+ * it could be e.g. a RED that will be attached below it.
+ *
+ * As the notifications start to arrive, use them to note what the
+ * future parent handle is, and keep track of which child FIFOs were
+ * seen. Then when the parent is known, retroactively offload those
+ * FIFOs.
+ */
+ u32 future_handle;
+ bool future_fifos[IEEE_8021QAZ_MAX_TCS];
+};
+
static bool
mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
enum mlxsw_sp_qdisc_type type)
@@ -77,36 +99,38 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
bool root_only)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int tclass, child_index;
if (parent == TC_H_ROOT)
- return mlxsw_sp_port->root_qdisc;
+ return &qdisc_state->root_qdisc;
- if (root_only || !mlxsw_sp_port->root_qdisc ||
- !mlxsw_sp_port->root_qdisc->ops ||
- TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
+ if (root_only || !qdisc_state ||
+ !qdisc_state->root_qdisc.ops ||
+ TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
return NULL;
child_index = TC_H_MIN(parent);
tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- return &mlxsw_sp_port->tclass_qdiscs[tclass];
+ return &qdisc_state->tclass_qdiscs[tclass];
}
static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
- if (mlxsw_sp_port->root_qdisc->handle == handle)
- return mlxsw_sp_port->root_qdisc;
+ if (qdisc_state->root_qdisc.handle == handle)
+ return &qdisc_state->root_qdisc;
- if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
+ if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
return NULL;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
- return &mlxsw_sp_port->tclass_qdiscs[i];
+ if (qdisc_state->tclass_qdiscs[i].handle == handle)
+ return &qdisc_state->tclass_qdiscs[i];
return NULL;
}
@@ -147,11 +171,15 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
if (err)
goto err_bad_param;
- err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_config;
- if (mlxsw_sp_qdisc->handle != handle) {
+ /* Check if the Qdisc changed. That includes a situation where an
+ * invisible Qdisc replaces another one, or is being added for the
+ * first time.
+ */
+ if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
mlxsw_sp_qdisc->ops = ops;
if (ops->clean_stats)
ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
@@ -360,7 +388,8 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
if (root_qdisc != mlxsw_sp_qdisc)
root_qdisc->stats_base.backlog -=
@@ -399,7 +428,7 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
@@ -559,7 +588,8 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
if (root_qdisc != mlxsw_sp_qdisc)
root_qdisc->stats_base.backlog -=
@@ -646,7 +676,7 @@ mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
@@ -735,8 +765,121 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
+ .type = MLXSW_SP_QDISC_FIFO,
+ .check_params = mlxsw_sp_qdisc_fifo_check_params,
+ .replace = mlxsw_sp_qdisc_fifo_replace,
+ .destroy = mlxsw_sp_qdisc_fifo_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int tclass, child_index;
+ u32 parent_handle;
+
+ /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
+ * sure that not more than one qdisc is created for a port at a time.
+ * RTNL is a simple proxy for that.
+ */
+ ASSERT_RTNL();
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
+ parent_handle = TC_H_MAJ(p->parent);
+ if (parent_handle != qdisc_state->future_handle) {
+ /* This notifications is for a different Qdisc than
+ * previously. Wipe the future cache.
+ */
+ memset(qdisc_state->future_fifos, 0,
+ sizeof(qdisc_state->future_fifos));
+ qdisc_state->future_handle = parent_handle;
+ }
+
+ child_index = TC_H_MIN(p->parent);
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
+ if (tclass < IEEE_8021QAZ_MAX_TCS) {
+ if (p->command == TC_FIFO_REPLACE)
+ qdisc_state->future_fifos[tclass] = true;
+ else if (p->command == TC_FIFO_DESTROY)
+ qdisc_state->future_fifos[tclass] = false;
+ }
+ }
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_FIFO_REPLACE) {
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo, NULL);
+ }
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_FIFO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_FIFO_DESTROY:
+ if (p->handle == mlxsw_sp_qdisc->handle)
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ return 0;
+ case TC_FIFO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_FIFO_REPLACE: /* Handled above. */
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -746,8 +889,8 @@ __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_port->tclass_qdiscs[i]);
- mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
+ &qdisc_state->tclass_qdiscs[i]);
+ qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
}
return 0;
@@ -780,12 +923,13 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
unsigned int nbands,
const unsigned int *quanta,
const unsigned int *weights,
const u8 *priomap)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *child_qdisc;
int tclass, i, band, backlog;
u8 old_priomap;
@@ -793,7 +937,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
@@ -822,28 +966,41 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
child_qdisc);
child_qdisc->stats_base.backlog = backlog;
}
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[tclass]) {
+ err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ if (err)
+ return err;
+ }
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
tclass, 0, false, 0);
}
+
+ qdisc_state->future_handle = TC_H_UNSPEC;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
return 0;
}
static int
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
zeroes, zeroes, p->priomap);
}
@@ -875,6 +1032,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *tc_qdisc;
u64 tx_packets = 0;
u64 tx_bytes = 0;
@@ -883,7 +1041,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
+ tc_qdisc = &qdisc_state->tclass_qdiscs[i];
mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
&tx_bytes, &tx_packets,
&drops, &backlog);
@@ -941,13 +1099,13 @@ mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_ets_qopt_offload_replace_params *p = params;
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
p->quanta, p->weights, p->priomap);
}
@@ -1009,11 +1167,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u8 band, u32 child_handle)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
if (band < IEEE_8021QAZ_MAX_TCS &&
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
+ qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
return 0;
if (!child_handle) {
@@ -1032,7 +1191,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
+ &qdisc_state->tclass_qdiscs[tclass_num]);
return -EOPNOTSUPP;
}
@@ -1114,37 +1273,23 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state;
int i;
- mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
- if (!mlxsw_sp_qdisc)
- goto err_root_qdisc_init;
-
- mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
- mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
- mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
+ if (!qdisc_state)
+ return -ENOMEM;
- mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
- sizeof(*mlxsw_sp_qdisc),
- GFP_KERNEL);
- if (!mlxsw_sp_qdisc)
- goto err_tclass_qdiscs_init;
-
- mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
+ qdisc_state->root_qdisc.prio_bitmap = 0xff;
+ qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
+ qdisc_state->tclass_qdiscs[i].tclass_num = i;
+ mlxsw_sp_port->qdisc = qdisc_state;
return 0;
-
-err_tclass_qdiscs_init:
- kfree(mlxsw_sp_port->root_qdisc);
-err_root_qdisc_init:
- return -ENOMEM;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
- kfree(mlxsw_sp_port->tclass_qdiscs);
- kfree(mlxsw_sp_port->root_qdisc);
+ kfree(mlxsw_sp_port->qdisc);
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 86d543ab1ab9..18e9ffa21cd4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -442,8 +442,23 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
mode, DEV_MAC_MODE_CFG);
- if (ocelot->ops->pcs_init)
- ocelot->ops->pcs_init(ocelot, port);
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
/* Enable MAC module */
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
@@ -1398,7 +1413,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
* a source for the other ports.
*/
for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) {
+ if (ocelot->bridge_fwd_mask & BIT(p)) {
unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) {
@@ -1413,18 +1428,10 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
}
}
- /* Avoid the NPI port from looping back to itself */
- if (p != ocelot->cpu)
- mask |= BIT(ocelot->cpu);
-
ocelot_write_rix(ocelot, mask,
ANA_PGID_PGID, PGID_SRC + p);
} else {
- /* Only the CPU port, this is compatible with link
- * aggregation.
- */
- ocelot_write_rix(ocelot,
- BIT(ocelot->cpu),
+ ocelot_write_rix(ocelot, 0,
ANA_PGID_PGID, PGID_SRC + p);
}
}
@@ -2176,24 +2183,29 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
return 0;
}
-static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
+ * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ */
+static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
int atop_wm;
- ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+ ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis
- * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
- * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+ * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ
*/
ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
SYS_PAUSE_CFG_PAUSE_STOP(101) |
SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
/* Tail dropping watermark */
- atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
- ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
+ OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen),
SYS_ATOP, port);
ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
}
@@ -2222,9 +2234,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
DEV_MAC_HDX_CFG);
/* Set Max Length and maximum tags allowed */
- ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+ ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
DEV_MAC_TAGS_CFG);
@@ -2293,42 +2306,62 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
}
EXPORT_SYMBOL(ocelot_probe_port);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction)
+/* Configure and enable the CPU port module, which is a set of queues.
+ * If @npi contains a valid port index, the CPU port module is connected
+ * to the Node Processor Interface (NPI). This is the mode through which
+ * frames can be injected from and extracted to an external CPU,
+ * over Ethernet.
+ */
+void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction)
{
- /* Configure and enable the CPU port. */
+ int cpu = ocelot->num_phys_ports;
+
+ /* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ /* Instead set up a multicast destination PGID for traffic copied to
+ * the CPU. Whitelisted MAC addresses like the port netdevice MAC
+ * addresses will be copied to the CPU via this PGID.
+ */
ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
ANA_PORT_PORT_CFG, cpu);
- /* If the CPU port is a physical port, set up the port in Node
- * Processor Interface (NPI) mode. This is the mode through which
- * frames can be injected from and extracted to an external CPU.
- * Only one port can be an NPI at the same time.
- */
- if (cpu < ocelot->num_phys_ports) {
- int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
+ if (npi >= 0 && npi < ocelot->num_phys_ports) {
+ int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi),
QSYS_EXT_CPU_CFG);
if (injection == OCELOT_TAG_PREFIX_SHORT)
- mtu += OCELOT_SHORT_PREFIX_LEN;
+ sdu += OCELOT_SHORT_PREFIX_LEN;
else if (injection == OCELOT_TAG_PREFIX_LONG)
- mtu += OCELOT_LONG_PREFIX_LEN;
-
- ocelot_port_set_mtu(ocelot, cpu, mtu);
+ sdu += OCELOT_LONG_PREFIX_LEN;
+
+ ocelot_port_set_maxlen(ocelot, cpu, sdu);
+
+ /* Enable NPI port */
+ ocelot_write_rix(ocelot,
+ QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, npi);
+ /* NPI port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot,
+ SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
+ SYS_PORT_MODE_INCL_INJ_HDR(injection),
+ SYS_PORT_MODE, npi);
}
- /* CPU port Injection/Extraction configuration */
+ /* Enable CPU port module */
ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
QSYS_SWITCH_PORT_MODE_PORT_ENA,
QSYS_SWITCH_PORT_MODE, cpu);
+ /* CPU port Injection/Extraction configuration */
ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
SYS_PORT_MODE_INCL_INJ_HDR(injection),
SYS_PORT_MODE, cpu);
@@ -2338,10 +2371,8 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
-
- ocelot->cpu = cpu;
}
-EXPORT_SYMBOL(ocelot_set_cpu_port);
+EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot)
{
@@ -2493,7 +2524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
- ocelot_ace_deinit();
if (ocelot->ptp_clock)
ptp_clock_unregister(ocelot->ptp_clock);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 04372ba72fec..e34ef8380eb3 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -28,16 +28,6 @@
#include "ocelot_tc.h"
#include "ocelot_ptp.h"
-#define PGID_AGGR 64
-#define PGID_SRC 80
-
-/* Reserved PGIDs */
-#define PGID_CPU (PGID_AGGR - 5)
-#define PGID_UC (PGID_AGGR - 4)
-#define PGID_MC (PGID_AGGR - 3)
-#define PGID_MCIPV4 (PGID_AGGR - 2)
-#define PGID_MCIPV6 (PGID_AGGR - 1)
-
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 86fc6e6b46dd..906b54025b17 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -6,60 +6,12 @@
#include <linux/iopoll.h>
#include <linux/proc_fs.h>
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_ace.h"
-#include "ocelot_vcap.h"
#include "ocelot_s2.h"
#define OCELOT_POLICER_DISCARD 0x17f
-
-static struct ocelot_acl_block *acl_block;
-
-struct vcap_props {
- const char *name; /* Symbolic name */
- u16 tg_width; /* Type-group width (in bits) */
- u16 sw_count; /* Sub word count */
- u16 entry_count; /* Entry count */
- u16 entry_words; /* Number of entry words */
- u16 entry_width; /* Entry width (in bits) */
- u16 action_count; /* Action count */
- u16 action_words; /* Number of action words */
- u16 action_width; /* Action width (in bits) */
- u16 action_type_width; /* Action type width (in bits) */
- struct {
- u16 width; /* Action type width (in bits) */
- u16 count; /* Action type sub word count */
- } action_table[2];
- u16 counter_words; /* Number of counter words */
- u16 counter_width; /* Counter width (in bits) */
-};
-
#define ENTRY_WIDTH 32
-#define BITS_TO_32BIT(x) (1 + (((x) - 1) / ENTRY_WIDTH))
-
-static const struct vcap_props vcap_is2 = {
- .name = "IS2",
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VCAP_IS2_CNT,
- .entry_words = BITS_TO_32BIT(VCAP_IS2_ENTRY_WIDTH),
- .entry_width = VCAP_IS2_ENTRY_WIDTH,
- .action_count = (VCAP_IS2_CNT + VCAP_PORT_CNT + 2),
- .action_words = BITS_TO_32BIT(VCAP_IS2_ACTION_WIDTH),
- .action_width = (VCAP_IS2_ACTION_WIDTH),
- .action_type_width = 1,
- .action_table = {
- {
- .width = (IS2_AO_ACL_ID + IS2_AL_ACL_ID),
- .count = 2
- },
- {
- .width = 6,
- .count = 4
- },
- },
- .counter_words = BITS_TO_32BIT(4 * ENTRY_WIDTH),
- .counter_width = ENTRY_WIDTH,
-};
enum vcap_sel {
VCAP_SEL_ENTRY = 0x1,
@@ -95,18 +47,20 @@ struct vcap_data {
u32 tg_mask; /* Current type-group mask */
};
-static u32 vcap_s2_read_update_ctrl(struct ocelot *oc)
+static u32 vcap_s2_read_update_ctrl(struct ocelot *ocelot)
{
- return ocelot_read(oc, S2_CORE_UPDATE_CTRL);
+ return ocelot_read(ocelot, S2_CORE_UPDATE_CTRL);
}
-static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
+static void vcap_cmd(struct ocelot *ocelot, u16 ix, int cmd, int sel)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+
u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
- if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2.entry_count)
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2->entry_count)
return;
if (!(sel & VCAP_SEL_ENTRY))
@@ -118,83 +72,101 @@ static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
if (!(sel & VCAP_SEL_COUNTER))
value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
- ocelot_write(oc, value, S2_CORE_UPDATE_CTRL);
- readx_poll_timeout(vcap_s2_read_update_ctrl, oc, value,
+ ocelot_write(ocelot, value, S2_CORE_UPDATE_CTRL);
+ readx_poll_timeout(vcap_s2_read_update_ctrl, ocelot, value,
(value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
10, 100000);
}
/* Convert from 0-based row to VCAP entry row and run command */
-static void vcap_row_cmd(struct ocelot *oc, u32 row, int cmd, int sel)
+static void vcap_row_cmd(struct ocelot *ocelot, u32 row, int cmd, int sel)
{
- vcap_cmd(oc, vcap_is2.entry_count - row - 1, cmd, sel);
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+
+ vcap_cmd(ocelot, vcap_is2->entry_count - row - 1, cmd, sel);
}
-static void vcap_entry2cache(struct ocelot *oc, struct vcap_data *data)
+static void vcap_entry2cache(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 entry_words, i;
- for (i = 0; i < vcap_is2.entry_words; i++) {
- ocelot_write_rix(oc, data->entry[i], S2_CACHE_ENTRY_DAT, i);
- ocelot_write_rix(oc, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+
+ for (i = 0; i < entry_words; i++) {
+ ocelot_write_rix(ocelot, data->entry[i], S2_CACHE_ENTRY_DAT, i);
+ ocelot_write_rix(ocelot, ~data->mask[i], S2_CACHE_MASK_DAT, i);
}
- ocelot_write(oc, data->tg, S2_CACHE_TG_DAT);
+ ocelot_write(ocelot, data->tg, S2_CACHE_TG_DAT);
}
-static void vcap_cache2entry(struct ocelot *oc, struct vcap_data *data)
+static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 entry_words, i;
+
+ entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.entry_words; i++) {
- data->entry[i] = ocelot_read_rix(oc, S2_CACHE_ENTRY_DAT, i);
+ for (i = 0; i < entry_words; i++) {
+ data->entry[i] = ocelot_read_rix(ocelot, S2_CACHE_ENTRY_DAT, i);
// Invert mask
- data->mask[i] = ~ocelot_read_rix(oc, S2_CACHE_MASK_DAT, i);
+ data->mask[i] = ~ocelot_read_rix(ocelot, S2_CACHE_MASK_DAT, i);
}
- data->tg = ocelot_read(oc, S2_CACHE_TG_DAT);
+ data->tg = ocelot_read(ocelot, S2_CACHE_TG_DAT);
}
-static void vcap_action2cache(struct ocelot *oc, struct vcap_data *data)
+static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i, width, mask;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 action_words, i, width, mask;
/* Encode action type */
- width = vcap_is2.action_type_width;
+ width = vcap_is2->action_type_width;
if (width) {
mask = GENMASK(width, 0);
data->action[0] = ((data->action[0] & ~mask) | data->type);
}
- for (i = 0; i < vcap_is2.action_words; i++)
- ocelot_write_rix(oc, data->action[i], S2_CACHE_ACTION_DAT, i);
+ action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
+
+ for (i = 0; i < action_words; i++)
+ ocelot_write_rix(ocelot, data->action[i], S2_CACHE_ACTION_DAT,
+ i);
- for (i = 0; i < vcap_is2.counter_words; i++)
- ocelot_write_rix(oc, data->counter[i], S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap_is2->counter_words; i++)
+ ocelot_write_rix(ocelot, data->counter[i], S2_CACHE_CNT_DAT, i);
}
-static void vcap_cache2action(struct ocelot *oc, struct vcap_data *data)
+static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i, width;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 action_words, i, width;
- for (i = 0; i < vcap_is2.action_words; i++)
- data->action[i] = ocelot_read_rix(oc, S2_CACHE_ACTION_DAT, i);
+ action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.counter_words; i++)
- data->counter[i] = ocelot_read_rix(oc, S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < action_words; i++)
+ data->action[i] = ocelot_read_rix(ocelot, S2_CACHE_ACTION_DAT,
+ i);
+
+ for (i = 0; i < vcap_is2->counter_words; i++)
+ data->counter[i] = ocelot_read_rix(ocelot, S2_CACHE_CNT_DAT, i);
/* Extract action type */
- width = vcap_is2.action_type_width;
+ width = vcap_is2->action_type_width;
data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
}
/* Calculate offsets for entry */
-static void is2_data_get(struct vcap_data *data, int ix)
+static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix)
{
- u32 i, col, offset, count, cnt, base, width = vcap_is2.tg_width;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 i, col, offset, count, cnt, base;
+ u32 width = vcap_is2->tg_width;
count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
col = (ix % 2);
- cnt = (vcap_is2.sw_count / count);
- base = (vcap_is2.sw_count - col * cnt - cnt);
+ cnt = (vcap_is2->sw_count / count);
+ base = (vcap_is2->sw_count - col * cnt - cnt);
data->tg_value = 0;
data->tg_mask = 0;
for (i = 0; i < cnt; i++) {
@@ -205,13 +177,13 @@ static void is2_data_get(struct vcap_data *data, int ix)
/* Calculate key/action/counter offsets */
col = (count - col - 1);
- data->key_offset = (base * vcap_is2.entry_width) / vcap_is2.sw_count;
- data->counter_offset = (cnt * col * vcap_is2.counter_width);
+ data->key_offset = (base * vcap_is2->entry_width) / vcap_is2->sw_count;
+ data->counter_offset = (cnt * col * vcap_is2->counter_width);
i = data->type;
- width = vcap_is2.action_table[i].width;
- cnt = vcap_is2.action_table[i].count;
+ width = vcap_is2->action_table[i].width;
+ cnt = vcap_is2->action_table[i].count;
data->action_offset =
- (((cnt * col * width) / count) + vcap_is2.action_type_width);
+ (((cnt * col * width) / count) + vcap_is2->action_type_width);
}
static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
@@ -242,22 +214,39 @@ static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
return value;
}
-static void vcap_key_set(struct vcap_data *data, u32 offset, u32 width,
- u32 value, u32 mask)
+static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value, u32 mask)
{
vcap_data_set(data->entry, offset + data->key_offset, width, value);
vcap_data_set(data->mask, offset + data->key_offset, width, mask);
}
-static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
- u8 *msk, u32 count)
+static void vcap_key_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
+ u32 value, u32 mask)
+{
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+
+ vcap_key_field_set(data, offset, length, value, mask);
+}
+
+static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
+ u8 *val, u8 *msk)
{
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 count = ocelot->vcap_is2_keys[field].length;
u32 i, j, n = 0, value = 0, mask = 0;
+ WARN_ON(count % 8);
+
/* Data wider than 32 bits are split up in chunks of maximum 32 bits.
* The 32 LSB of the data are written to the 32 MSB of the TCAM.
*/
- offset += (count * 8);
+ offset += count;
+ count /= 8;
+
for (i = 0; i < count; i++) {
j = (count - i - 1);
value += (val[j] << n);
@@ -265,7 +254,7 @@ static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
n += 8;
if (n == ENTRY_WIDTH || (i + 1) == count) {
offset -= n;
- vcap_key_set(data, offset, n, value, mask);
+ vcap_key_field_set(data, offset, n, value, mask);
n = 0;
value = 0;
mask = 0;
@@ -273,55 +262,62 @@ static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
}
}
-static void vcap_key_l4_port_set(struct vcap_data *data, u32 offset,
+static void vcap_key_l4_port_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
struct ocelot_vcap_udp_tcp *port)
{
- vcap_key_set(data, offset, 16, port->value, port->mask);
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+
+ WARN_ON(length != 16);
+
+ vcap_key_field_set(data, offset, length, port->value, port->mask);
}
-static void vcap_key_bit_set(struct vcap_data *data, u32 offset,
+static void vcap_key_bit_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
enum ocelot_vcap_bit val)
{
- vcap_key_set(data, offset, 1, val == OCELOT_VCAP_BIT_1 ? 1 : 0,
- val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
-}
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
+ u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
-#define VCAP_KEY_SET(fld, val, msk) \
- vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, val, msk)
-#define VCAP_KEY_ANY_SET(fld) \
- vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, 0, 0)
-#define VCAP_KEY_BIT_SET(fld, val) vcap_key_bit_set(&data, IS2_HKO_##fld, val)
-#define VCAP_KEY_BYTES_SET(fld, val, msk) \
- vcap_key_bytes_set(&data, IS2_HKO_##fld, val, msk, IS2_HKL_##fld / 8)
+ WARN_ON(length != 1);
-static void vcap_action_set(struct vcap_data *data, u32 offset, u32 width,
- u32 value)
-{
- vcap_data_set(data->action, offset + data->action_offset, width, value);
+ vcap_key_field_set(data, offset, length, value, msk);
}
-#define VCAP_ACT_SET(fld, val) \
- vcap_action_set(data, IS2_AO_##fld, IS2_AL_##fld, val)
+static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_action_field field, u32 value)
+{
+ int offset = ocelot->vcap_is2_actions[field].offset;
+ int length = ocelot->vcap_is2_actions[field].length;
-static void is2_action_set(struct vcap_data *data,
+ vcap_data_set(data->action, offset + data->action_offset, length,
+ value);
+}
+
+static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
enum ocelot_ace_action action)
{
switch (action) {
case OCELOT_ACL_ACTION_DROP:
- VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x1);
- VCAP_ACT_SET(POLICE_ENA, 0x1);
- VCAP_ACT_SET(POLICE_IDX, OCELOT_POLICER_DISCARD);
- VCAP_ACT_SET(CPU_QU_NUM, 0x0);
- VCAP_ACT_SET(CPU_COPY_ENA, 0x0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
+ OCELOT_POLICER_DISCARD);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
break;
case OCELOT_ACL_ACTION_TRAP:
- VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x1);
- VCAP_ACT_SET(POLICE_ENA, 0x0);
- VCAP_ACT_SET(POLICE_IDX, 0x0);
- VCAP_ACT_SET(CPU_QU_NUM, 0x0);
- VCAP_ACT_SET(CPU_COPY_ENA, 0x1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1);
break;
}
}
@@ -329,6 +325,7 @@ static void is2_action_set(struct vcap_data *data,
static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_ace_rule *ace)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 val, msk, type, type_mask = 0xf, i, count;
struct ocelot_ace_vlan *tag = &ace->vlan;
struct ocelot_vcap_u64 payload;
@@ -344,60 +341,76 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(&data, ix);
+ is2_data_get(ocelot, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (ace->prio != 0)
data.tg |= data.tg_value;
data.type = IS2_ACTION_TYPE_NORMAL;
- VCAP_KEY_ANY_SET(PAG);
- VCAP_KEY_SET(IGR_PORT_MASK, 0, ~BIT(ace->chip_port));
- VCAP_KEY_BIT_SET(FIRST, OCELOT_VCAP_BIT_1);
- VCAP_KEY_BIT_SET(HOST_MATCH, OCELOT_VCAP_BIT_ANY);
- VCAP_KEY_BIT_SET(L2_MC, ace->dmac_mc);
- VCAP_KEY_BIT_SET(L2_BC, ace->dmac_bc);
- VCAP_KEY_BIT_SET(VLAN_TAGGED, tag->tagged);
- VCAP_KEY_SET(VID, tag->vid.value, tag->vid.mask);
- VCAP_KEY_SET(PCP, tag->pcp.value[0], tag->pcp.mask[0]);
- VCAP_KEY_BIT_SET(DEI, tag->dei);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
+ ~ace->ingress_port_mask);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH,
+ OCELOT_VCAP_BIT_ANY);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei);
switch (ace->type) {
case OCELOT_ACE_TYPE_ETYPE: {
struct ocelot_ace_frame_etype *etype = &ace->frame.etype;
type = IS2_TYPE_ETYPE;
- VCAP_KEY_BYTES_SET(L2_DMAC, etype->dmac.value,
- etype->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, etype->smac.value,
- etype->smac.mask);
- VCAP_KEY_BYTES_SET(MAC_ETYPE_ETYPE, etype->etype.value,
- etype->etype.mask);
- VCAP_KEY_ANY_SET(MAC_ETYPE_L2_PAYLOAD); // Clear unused bits
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ETYPE_L2_PAYLOAD,
- etype->data.value, etype->data.mask, 2);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ etype->dmac.value, etype->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ /* Clear unused bits */
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ 0, 0);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ etype->data.value, etype->data.mask);
break;
}
case OCELOT_ACE_TYPE_LLC: {
struct ocelot_ace_frame_llc *llc = &ace->frame.llc;
type = IS2_TYPE_LLC;
- VCAP_KEY_BYTES_SET(L2_DMAC, llc->dmac.value, llc->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, llc->smac.value, llc->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ llc->dmac.value, llc->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ llc->smac.value, llc->smac.mask);
for (i = 0; i < 4; i++) {
payload.value[i] = llc->llc.value[i];
payload.mask[i] = llc->llc.mask[i];
}
- VCAP_KEY_BYTES_SET(MAC_LLC_L2_LLC, payload.value, payload.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ payload.value, payload.mask);
break;
}
case OCELOT_ACE_TYPE_SNAP: {
struct ocelot_ace_frame_snap *snap = &ace->frame.snap;
type = IS2_TYPE_SNAP;
- VCAP_KEY_BYTES_SET(L2_DMAC, snap->dmac.value, snap->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, snap->smac.value, snap->smac.mask);
- VCAP_KEY_BYTES_SET(MAC_SNAP_L2_SNAP,
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ snap->dmac.value, snap->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ snap->smac.value, snap->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
ace->frame.snap.snap.value,
ace->frame.snap.snap.mask);
break;
@@ -406,26 +419,42 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_ace_frame_arp *arp = &ace->frame.arp;
type = IS2_TYPE_ARP;
- VCAP_KEY_BYTES_SET(MAC_ARP_L2_SMAC, arp->smac.value,
- arp->smac.mask);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_ADDR_SPACE_OK, arp->ethernet);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_PROTO_SPACE_OK, arp->ip);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_LEN_OK, arp->length);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_TGT_MATCH, arp->dmac_match);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_SENDER_MATCH, arp->smac_match);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_OPCODE_UNKNOWN, arp->unknown);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
+ arp->smac.value, arp->smac.mask);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ arp->ethernet);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ arp->ip);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ arp->length);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ arp->dmac_match);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ arp->smac_match);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ arp->unknown);
/* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
(arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
(arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
- VCAP_KEY_SET(MAC_ARP_ARP_OPCODE, val, msk);
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_DIP,
- arp->dip.value.addr, arp->dip.mask.addr, 4);
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_SIP,
- arp->sip.value.addr, arp->sip.mask.addr, 4);
- VCAP_KEY_ANY_SET(MAC_ARP_DIP_EQ_SIP);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
+ val, msk);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ arp->dip.value.addr, arp->dip.mask.addr);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ arp->sip.value.addr, arp->sip.mask.addr);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ 0, 0);
break;
}
case OCELOT_ACE_TYPE_IPV4:
@@ -493,18 +522,23 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
seq_zero = ipv6->seq_zero;
}
- VCAP_KEY_BIT_SET(IP4,
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4,
ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- VCAP_KEY_BIT_SET(L3_FRAGMENT, fragment);
- VCAP_KEY_ANY_SET(L3_FRAG_OFS_GT0);
- VCAP_KEY_BIT_SET(L3_OPTIONS, options);
- VCAP_KEY_BIT_SET(L3_TTL_GT0, ttl);
- VCAP_KEY_BYTES_SET(L3_TOS, ds.value, ds.mask);
- vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_DIP, dip.value.addr,
- dip.mask.addr, 4);
- vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_SIP, sip.value.addr,
- sip.mask.addr, 4);
- VCAP_KEY_BIT_SET(DIP_EQ_SIP, sip_eq_dip);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_FRAGMENT,
+ fragment);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_OPTIONS,
+ options);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ ttl);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_TOS,
+ ds.value, ds.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_DIP,
+ dip.value.addr, dip.mask.addr);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DIP_EQ_SIP,
+ sip_eq_dip);
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
@@ -512,25 +546,34 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
/* UDP/TCP protocol match */
tcp = (val == 6 ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_TCP, tcp);
- vcap_key_l4_port_set(&data,
- IS2_HKO_IP4_TCP_UDP_L4_DPORT,
- dport);
- vcap_key_l4_port_set(&data,
- IS2_HKO_IP4_TCP_UDP_L4_SPORT,
- sport);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_RNG);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_SPORT_EQ_DPORT,
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_TCP, tcp);
+ vcap_key_l4_port_set(ocelot, &data,
+ VCAP_IS2_HK_L4_DPORT, dport);
+ vcap_key_l4_port_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SPORT, sport);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
sport_eq_dport);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_SEQUENCE_EQ0, seq_zero);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_FIN, tcp_fin);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_SYN, tcp_syn);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_RST, tcp_rst);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_PSH, tcp_psh);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_ACK, tcp_ack);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_URG, tcp_urg);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_DOM);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_VER);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ seq_zero);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_FIN,
+ tcp_fin);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_SYN,
+ tcp_syn);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_RST,
+ tcp_rst);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_PSH,
+ tcp_psh);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_ACK,
+ tcp_ack);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_URG,
+ tcp_urg);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_DOM,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_VER,
+ 0, 0);
} else {
if (msk == 0) {
/* Any IP protocol match */
@@ -543,10 +586,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
payload.mask[i] = ip_data->mask[i];
}
}
- VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PROTO, proto.value,
- proto.mask);
- VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PAYLOAD, payload.value,
- payload.mask);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ proto.value, proto.mask);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ payload.value, payload.mask);
}
break;
}
@@ -554,19 +599,21 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
default:
type = 0;
type_mask = 0;
- count = (vcap_is2.entry_width / 2);
- for (i = (IS2_HKO_PCP + IS2_HKL_PCP); i < count;
- i += ENTRY_WIDTH) {
- /* Clear entry data */
- vcap_key_set(&data, i, min(32u, count - i), 0, 0);
+ count = vcap_is2->entry_width / 2;
+ /* Iterate over the non-common part of the key and
+ * clear entry data
+ */
+ for (i = ocelot->vcap_is2_keys[VCAP_IS2_HK_L2_DMAC].offset;
+ i < count; i += ENTRY_WIDTH) {
+ vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
}
break;
}
- VCAP_KEY_SET(TYPE, type, type_mask);
- is2_action_set(&data, ace->action);
- vcap_data_set(data.counter, data.counter_offset, vcap_is2.counter_width,
- ace->stats.pkts);
+ vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask);
+ is2_action_set(ocelot, &data, ace->action);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap_is2->counter_width, ace->stats.pkts);
/* Write row */
vcap_entry2cache(ocelot, &data);
@@ -574,19 +621,20 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
}
-static void is2_entry_get(struct ocelot_ace_rule *rule, int ix)
+static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule,
+ int ix)
{
- struct ocelot *op = rule->port->ocelot;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
struct vcap_data data;
int row = (ix / 2);
u32 cnt;
- vcap_row_cmd(op, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
- vcap_cache2action(op, &data);
+ vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(&data, ix);
+ is2_data_get(ocelot, &data, ix);
cnt = vcap_data_get(data.counter, data.counter_offset,
- vcap_is2.counter_width);
+ vcap_is2->counter_width);
rule->stats.pkts = cnt;
}
@@ -641,25 +689,27 @@ ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
return NULL;
}
-int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *ace;
int i, index;
/* Add rule to the linked list */
- ocelot_ace_rule_add(acl_block, rule);
+ ocelot_ace_rule_add(block, rule);
/* Get the index of the inserted rule */
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ index = ocelot_ace_rule_get_index_id(block, rule);
/* Move down the rules to make place for the new rule */
- for (i = acl_block->count - 1; i > index; i--) {
- ace = ocelot_ace_rule_get_rule_index(acl_block, i);
- is2_entry_set(rule->port->ocelot, i, ace);
+ for (i = block->count - 1; i > index; i--) {
+ ace = ocelot_ace_rule_get_rule_index(block, i);
+ is2_entry_set(ocelot, i, ace);
}
/* Now insert the new rule */
- is2_entry_set(rule->port->ocelot, index, rule);
+ is2_entry_set(ocelot, index, rule);
return 0;
}
@@ -680,8 +730,10 @@ static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
block->count--;
}
-int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule del_ace;
struct ocelot_ace_rule *ace;
int i, index;
@@ -689,70 +741,54 @@ int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
memset(&del_ace, 0, sizeof(del_ace));
/* Gets index of the rule */
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ index = ocelot_ace_rule_get_index_id(block, rule);
/* Delete rule */
- ocelot_ace_rule_del(acl_block, rule);
+ ocelot_ace_rule_del(block, rule);
/* Move up all the blocks over the deleted rule */
- for (i = index; i < acl_block->count; i++) {
- ace = ocelot_ace_rule_get_rule_index(acl_block, i);
- is2_entry_set(rule->port->ocelot, i, ace);
+ for (i = index; i < block->count; i++) {
+ ace = ocelot_ace_rule_get_rule_index(block, i);
+ is2_entry_set(ocelot, i, ace);
}
/* Now delete the last rule, because it is duplicated */
- is2_entry_set(rule->port->ocelot, acl_block->count, &del_ace);
+ is2_entry_set(ocelot, block->count, &del_ace);
return 0;
}
-int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *tmp;
int index;
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
- is2_entry_get(rule, index);
+ index = ocelot_ace_rule_get_index_id(block, rule);
+ is2_entry_get(ocelot, rule, index);
/* After we get the result we need to clear the counters */
- tmp = ocelot_ace_rule_get_rule_index(acl_block, index);
+ tmp = ocelot_ace_rule_get_rule_index(block, index);
tmp->stats.pkts = 0;
- is2_entry_set(rule->port->ocelot, index, tmp);
+ is2_entry_set(ocelot, index, tmp);
return 0;
}
-static struct ocelot_acl_block *ocelot_acl_block_create(struct ocelot *ocelot)
-{
- struct ocelot_acl_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
-
- INIT_LIST_HEAD(&block->rules);
- block->count = 0;
- block->ocelot = ocelot;
-
- return block;
-}
-
-static void ocelot_acl_block_destroy(struct ocelot_acl_block *block)
-{
- kfree(block);
-}
-
int ocelot_ace_init(struct ocelot *ocelot)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
struct vcap_data data;
memset(&data, 0, sizeof(data));
+
vcap_entry2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2.entry_count, S2_CORE_MV_CFG);
+ ocelot_write(ocelot, vcap_is2->entry_count, S2_CORE_MV_CFG);
vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
vcap_action2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2.action_count, S2_CORE_MV_CFG);
+ ocelot_write(ocelot, vcap_is2->action_count, S2_CORE_MV_CFG);
vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
@@ -771,12 +807,7 @@ int ocelot_ace_init(struct ocelot *ocelot)
ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
OCELOT_POLICER_DISCARD);
- acl_block = ocelot_acl_block_create(ocelot);
+ INIT_LIST_HEAD(&ocelot->acl_block.rules);
return 0;
}
-
-void ocelot_ace_deinit(void)
-{
- ocelot_acl_block_destroy(acl_block);
-}
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index c08e3e8482e7..b9a5868e3f15 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -186,14 +186,13 @@ struct ocelot_ace_stats {
struct ocelot_ace_rule {
struct list_head list;
- struct ocelot_port *port;
u16 prio;
u32 id;
enum ocelot_ace_action action;
struct ocelot_ace_stats stats;
- int chip_port;
+ u16 ingress_port_mask;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -211,22 +210,17 @@ struct ocelot_ace_rule {
} frame;
};
-struct ocelot_acl_block {
- struct list_head rules;
- struct ocelot *ocelot;
- int count;
-};
-
-int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
int ocelot_ace_init(struct ocelot *ocelot);
-void ocelot_ace_deinit(void);
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
- struct flow_block_offload *f);
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
- struct flow_block_offload *f);
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress);
#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 1135a18019c7..0ac9fbf77a01 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -14,9 +14,14 @@
#include <linux/skbuff.h>
#include <net/switchdev.h>
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+#define VSC7514_VCAP_IS2_CNT 64
+#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
+#define VSC7514_VCAP_PORT_CNT 11
static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
{
@@ -211,29 +216,6 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-static void ocelot_port_pcs_init(struct ocelot *ocelot, int port)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Disable HDX fast control */
- ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
- DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
- PCS1G_MODE_CFG);
- ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
-
- /* No loopback */
- ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
-}
-
static int ocelot_reset(struct ocelot *ocelot)
{
int retries = 100;
@@ -258,10 +240,132 @@ static int ocelot_reset(struct ocelot *ocelot)
}
static const struct ocelot_ops ocelot_ops = {
- .pcs_init = ocelot_port_pcs_init,
.reset = ocelot_reset,
};
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
+ [VCAP_IS2_HK_RSV2] = { 25, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
+ [VCAP_IS2_HK_L2_MC] = { 27, 1},
+ [VCAP_IS2_HK_L2_BC] = { 28, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
+ [VCAP_IS2_HK_VID] = { 30, 12},
+ [VCAP_IS2_HK_DEI] = { 42, 1},
+ [VCAP_IS2_HK_PCP] = { 43, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {124, 1},
+ [VCAP_IS2_HK_L4_SPORT] = {125, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {141, 16},
+ [VCAP_IS2_HK_L4_RNG] = {157, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
+ [VCAP_IS2_HK_L4_URG] = {167, 1},
+ [VCAP_IS2_HK_L4_ACK] = {168, 1},
+ [VCAP_IS2_HK_L4_PSH] = {169, 1},
+ [VCAP_IS2_HK_L4_RST] = {170, 1},
+ [VCAP_IS2_HK_L4_SYN] = {171, 1},
+ [VCAP_IS2_HK_L4_FIN] = {172, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
+ [VCAP_IS2_HK_OAM_VER] = {149, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static const struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_IS2] = {
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VSC7514_VCAP_IS2_CNT,
+ .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
+ .action_count = VSC7514_VCAP_IS2_CNT +
+ VSC7514_VCAP_PORT_CNT + 2,
+ .action_width = 99,
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = 4,
+ .counter_width = 32,
+ },
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -349,8 +453,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ptp = 1;
}
- ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
-
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(&pdev->dev, "no ethernet-ports child node found\n");
@@ -362,9 +464,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
+ ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
+ ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
+ ocelot->vcap = vsc7514_vcap_props;
+
ocelot_init(ocelot);
- ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports,
- OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE);
+ /* No NPI port */
+ ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_NONE);
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 3d65b99b9734..6d84173373c7 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -8,27 +8,26 @@
#include "ocelot_ace.h"
-struct ocelot_port_block {
- struct ocelot_acl_block *block;
- struct ocelot_port_private *priv;
-};
-
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
- struct ocelot_ace_rule *rule)
+ struct ocelot_ace_rule *ace)
{
const struct flow_action_entry *a;
int i;
- if (f->rule->action.num_entries != 1)
+ if (!flow_offload_has_one_action(&f->rule->action))
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_types_check(&f->rule->action,
+ f->common.extack))
return -EOPNOTSUPP;
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_DROP:
- rule->action = OCELOT_ACL_ACTION_DROP;
+ ace->action = OCELOT_ACL_ACTION_DROP;
break;
case FLOW_ACTION_TRAP:
- rule->action = OCELOT_ACL_ACTION_TRAP;
+ ace->action = OCELOT_ACL_ACTION_TRAP;
break;
default:
return -EOPNOTSUPP;
@@ -39,7 +38,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f,
}
static int ocelot_flower_parse(struct flow_cls_offload *f,
- struct ocelot_ace_rule *ocelot_rule)
+ struct ocelot_ace_rule *ace)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
@@ -84,14 +83,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
- ocelot_rule->type = OCELOT_ACE_TYPE_ETYPE;
- ether_addr_copy(ocelot_rule->frame.etype.dmac.value,
+ ace->type = OCELOT_ACE_TYPE_ETYPE;
+ ether_addr_copy(ace->frame.etype.dmac.value,
match.key->dst);
- ether_addr_copy(ocelot_rule->frame.etype.smac.value,
+ ether_addr_copy(ace->frame.etype.smac.value,
match.key->src);
- ether_addr_copy(ocelot_rule->frame.etype.dmac.mask,
+ ether_addr_copy(ace->frame.etype.dmac.mask,
match.mask->dst);
- ether_addr_copy(ocelot_rule->frame.etype.smac.mask,
+ ether_addr_copy(ace->frame.etype.smac.mask,
match.mask->src);
goto finished_key_parsing;
}
@@ -101,17 +100,17 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
flow_rule_match_basic(rule, &match);
if (ntohs(match.key->n_proto) == ETH_P_IP) {
- ocelot_rule->type = OCELOT_ACE_TYPE_IPV4;
- ocelot_rule->frame.ipv4.proto.value[0] =
+ ace->type = OCELOT_ACE_TYPE_IPV4;
+ ace->frame.ipv4.proto.value[0] =
match.key->ip_proto;
- ocelot_rule->frame.ipv4.proto.mask[0] =
+ ace->frame.ipv4.proto.mask[0] =
match.mask->ip_proto;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
- ocelot_rule->type = OCELOT_ACE_TYPE_IPV6;
- ocelot_rule->frame.ipv6.proto.value[0] =
+ ace->type = OCELOT_ACE_TYPE_IPV6;
+ ace->frame.ipv6.proto.value[0] =
match.key->ip_proto;
- ocelot_rule->frame.ipv6.proto.mask[0] =
+ ace->frame.ipv6.proto.mask[0] =
match.mask->ip_proto;
}
}
@@ -122,16 +121,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
u8 *tmp;
flow_rule_match_ipv4_addrs(rule, &match);
- tmp = &ocelot_rule->frame.ipv4.sip.value.addr[0];
+ tmp = &ace->frame.ipv4.sip.value.addr[0];
memcpy(tmp, &match.key->src, 4);
- tmp = &ocelot_rule->frame.ipv4.sip.mask.addr[0];
+ tmp = &ace->frame.ipv4.sip.mask.addr[0];
memcpy(tmp, &match.mask->src, 4);
- tmp = &ocelot_rule->frame.ipv4.dip.value.addr[0];
+ tmp = &ace->frame.ipv4.dip.value.addr[0];
memcpy(tmp, &match.key->dst, 4);
- tmp = &ocelot_rule->frame.ipv4.dip.mask.addr[0];
+ tmp = &ace->frame.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
}
@@ -144,213 +143,110 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- ocelot_rule->frame.ipv4.sport.value = ntohs(match.key->src);
- ocelot_rule->frame.ipv4.sport.mask = ntohs(match.mask->src);
- ocelot_rule->frame.ipv4.dport.value = ntohs(match.key->dst);
- ocelot_rule->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ ace->frame.ipv4.sport.value = ntohs(match.key->src);
+ ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
+ ace->frame.ipv4.dport.value = ntohs(match.key->dst);
+ ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- ocelot_rule->type = OCELOT_ACE_TYPE_ANY;
- ocelot_rule->vlan.vid.value = match.key->vlan_id;
- ocelot_rule->vlan.vid.mask = match.mask->vlan_id;
- ocelot_rule->vlan.pcp.value[0] = match.key->vlan_priority;
- ocelot_rule->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ ace->type = OCELOT_ACE_TYPE_ANY;
+ ace->vlan.vid.value = match.key->vlan_id;
+ ace->vlan.vid.mask = match.mask->vlan_id;
+ ace->vlan.pcp.value[0] = match.key->vlan_priority;
+ ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
}
finished_key_parsing:
- ocelot_rule->prio = f->common.prio;
- ocelot_rule->id = f->cookie;
- return ocelot_flower_parse_action(f, ocelot_rule);
+ ace->prio = f->common.prio;
+ ace->id = f->cookie;
+ return ocelot_flower_parse_action(f, ace);
}
static
-struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
- struct ocelot_port_block *block)
+struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
{
- struct ocelot_ace_rule *rule;
+ struct ocelot_ace_rule *ace;
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
+ ace = kzalloc(sizeof(*ace), GFP_KERNEL);
+ if (!ace)
return NULL;
- rule->port = &block->priv->port;
- rule->chip_port = block->priv->chip_port;
- return rule;
+ ace->ingress_port_mask = BIT(port);
+ return ace;
}
-static int ocelot_flower_replace(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule *rule;
+ struct ocelot_ace_rule *ace;
int ret;
- rule = ocelot_ace_rule_create(f, port_block);
- if (!rule)
+ ace = ocelot_ace_rule_create(ocelot, port, f);
+ if (!ace)
return -ENOMEM;
- ret = ocelot_flower_parse(f, rule);
+ ret = ocelot_flower_parse(f, ace);
if (ret) {
- kfree(rule);
+ kfree(ace);
return ret;
}
- ret = ocelot_ace_rule_offload_add(rule);
- if (ret)
- return ret;
-
- port_block->priv->tc.offload_cnt++;
- return 0;
+ return ocelot_ace_rule_offload_add(ocelot, ace);
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
-static int ocelot_flower_destroy(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule rule;
- int ret;
+ struct ocelot_ace_rule ace;
- rule.prio = f->common.prio;
- rule.port = &port_block->priv->port;
- rule.id = f->cookie;
+ ace.prio = f->common.prio;
+ ace.id = f->cookie;
- ret = ocelot_ace_rule_offload_del(&rule);
- if (ret)
- return ret;
-
- port_block->priv->tc.offload_cnt--;
- return 0;
+ return ocelot_ace_rule_offload_del(ocelot, &ace);
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
-static int ocelot_flower_stats_update(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule rule;
+ struct ocelot_ace_rule ace;
int ret;
- rule.prio = f->common.prio;
- rule.port = &port_block->priv->port;
- rule.id = f->cookie;
- ret = ocelot_ace_rule_stats_update(&rule);
+ ace.prio = f->common.prio;
+ ace.id = f->cookie;
+ ret = ocelot_ace_rule_stats_update(ocelot, &ace);
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, rule.stats.pkts, 0x0);
+ flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0);
return 0;
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
-static int ocelot_setup_tc_cls_flower(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress)
{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ingress)
+ return -EOPNOTSUPP;
+
switch (f->command) {
case FLOW_CLS_REPLACE:
- return ocelot_flower_replace(f, port_block);
+ return ocelot_cls_flower_replace(ocelot, port, f, ingress);
case FLOW_CLS_DESTROY:
- return ocelot_flower_destroy(f, port_block);
+ return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
case FLOW_CLS_STATS:
- return ocelot_flower_stats_update(f, port_block);
+ return ocelot_cls_flower_stats(ocelot, port, f, ingress);
default:
return -EOPNOTSUPP;
}
}
-
-static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct ocelot_port_block *port_block = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return ocelot_setup_tc_cls_flower(type_data, cb_priv);
- case TC_SETUP_CLSMATCHALL:
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static struct ocelot_port_block*
-ocelot_port_block_create(struct ocelot_port_private *priv)
-{
- struct ocelot_port_block *port_block;
-
- port_block = kzalloc(sizeof(*port_block), GFP_KERNEL);
- if (!port_block)
- return NULL;
-
- port_block->priv = priv;
-
- return port_block;
-}
-
-static void ocelot_port_block_destroy(struct ocelot_port_block *block)
-{
- kfree(block);
-}
-
-static void ocelot_tc_block_unbind(void *cb_priv)
-{
- struct ocelot_port_block *port_block = cb_priv;
-
- ocelot_port_block_destroy(port_block);
-}
-
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
- struct flow_block_offload *f)
-{
- struct ocelot_port_block *port_block;
- struct flow_block_cb *block_cb;
- int ret;
-
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
-
- block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, priv);
- if (!block_cb) {
- port_block = ocelot_port_block_create(priv);
- if (!port_block)
- return -ENOMEM;
-
- block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
- priv, port_block,
- ocelot_tc_block_unbind);
- if (IS_ERR(block_cb)) {
- ret = PTR_ERR(block_cb);
- goto err_cb_register;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, f->driver_block_list);
- } else {
- port_block = flow_block_cb_priv(block_cb);
- }
-
- flow_block_cb_incref(block_cb);
- return 0;
-
-err_cb_register:
- ocelot_port_block_destroy(port_block);
-
- return ret;
-}
-
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
- struct flow_block_offload *f)
-{
- struct flow_block_cb *block_cb;
-
- block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, priv);
- if (!block_cb)
- return;
-
- if (!flow_block_cb_decref(block_cb)) {
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- }
-}
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index a4f7fbd76507..3ff5ef41eccf 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -20,9 +20,6 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
int port = priv->chip_port;
int err;
- netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n",
- __func__, port, f->command, f->cookie);
-
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
return -EOPNOTSUPP;
@@ -99,17 +96,10 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSMATCHALL:
- netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
- ingress ? "ingress" : "egress");
-
return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return 0;
+ return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
default:
- netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n",
- type,
- ingress ? "ingress" : "egress");
-
return -EOPNOTSUPP;
}
}
@@ -137,10 +127,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- int err;
-
- netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n",
- f->command, f->binder_type);
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
@@ -162,11 +148,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
- err = ocelot_setup_tc_block_flower_bind(priv, f);
- if (err < 0) {
- flow_block_cb_free(block_cb);
- return err;
- }
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
@@ -175,7 +156,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
if (!block_cb)
return -ENOENT;
- ocelot_setup_tc_block_flower_unbind(priv, f);
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
deleted file mode 100644
index e22eac1da783..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
- * Microsemi Ocelot Switch driver
- * Copyright (c) 2019 Microsemi Corporation
- */
-
-#ifndef _OCELOT_VCAP_H_
-#define _OCELOT_VCAP_H_
-
-/* =================================================================
- * VCAP Common
- * =================================================================
- */
-
-/* VCAP Type-Group values */
-#define VCAP_TG_NONE 0 /* Entry is invalid */
-#define VCAP_TG_FULL 1 /* Full entry */
-#define VCAP_TG_HALF 2 /* Half entry */
-#define VCAP_TG_QUARTER 3 /* Quarter entry */
-
-/* =================================================================
- * VCAP IS2
- * =================================================================
- */
-
-#define VCAP_IS2_CNT 64
-#define VCAP_IS2_ENTRY_WIDTH 376
-#define VCAP_IS2_ACTION_WIDTH 99
-#define VCAP_PORT_CNT 11
-
-/* IS2 half key types */
-#define IS2_TYPE_ETYPE 0
-#define IS2_TYPE_LLC 1
-#define IS2_TYPE_SNAP 2
-#define IS2_TYPE_ARP 3
-#define IS2_TYPE_IP_UDP_TCP 4
-#define IS2_TYPE_IP_OTHER 5
-#define IS2_TYPE_IPV6 6
-#define IS2_TYPE_OAM 7
-#define IS2_TYPE_SMAC_SIP6 8
-#define IS2_TYPE_ANY 100 /* Pseudo type */
-
-/* IS2 half key type mask for matching any IP */
-#define IS2_TYPE_MASK_IP_ANY 0xe
-
-/* IS2 action types */
-#define IS2_ACTION_TYPE_NORMAL 0
-#define IS2_ACTION_TYPE_SMAC_SIP 1
-
-/* IS2 MASK_MODE values */
-#define IS2_ACT_MASK_MODE_NONE 0
-#define IS2_ACT_MASK_MODE_FILTER 1
-#define IS2_ACT_MASK_MODE_POLICY 2
-#define IS2_ACT_MASK_MODE_REDIR 3
-
-/* IS2 REW_OP values */
-#define IS2_ACT_REW_OP_NONE 0
-#define IS2_ACT_REW_OP_PTP_ONE 2
-#define IS2_ACT_REW_OP_PTP_TWO 3
-#define IS2_ACT_REW_OP_SPECIAL 8
-#define IS2_ACT_REW_OP_PTP_ORG 9
-#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
-#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
-#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
-#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
-
-#define VCAP_PORT_WIDTH 4
-
-/* IS2 quarter key - SMAC_SIP4 */
-#define IS2_QKO_IGR_PORT 0
-#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
-#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
-#define IS2_QKL_L2_SMAC 48
-#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
-#define IS2_QKL_L3_IP4_SIP 32
-
-/* IS2 half key - common */
-#define IS2_HKO_TYPE 0
-#define IS2_HKL_TYPE 4
-#define IS2_HKO_FIRST (IS2_HKO_TYPE + IS2_HKL_TYPE)
-#define IS2_HKL_FIRST 1
-#define IS2_HKO_PAG (IS2_HKO_FIRST + IS2_HKL_FIRST)
-#define IS2_HKL_PAG 8
-#define IS2_HKO_IGR_PORT_MASK (IS2_HKO_PAG + IS2_HKL_PAG)
-#define IS2_HKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
-#define IS2_HKO_SERVICE_FRM (IS2_HKO_IGR_PORT_MASK + IS2_HKL_IGR_PORT_MASK)
-#define IS2_HKL_SERVICE_FRM 1
-#define IS2_HKO_HOST_MATCH (IS2_HKO_SERVICE_FRM + IS2_HKL_SERVICE_FRM)
-#define IS2_HKL_HOST_MATCH 1
-#define IS2_HKO_L2_MC (IS2_HKO_HOST_MATCH + IS2_HKL_HOST_MATCH)
-#define IS2_HKL_L2_MC 1
-#define IS2_HKO_L2_BC (IS2_HKO_L2_MC + IS2_HKL_L2_MC)
-#define IS2_HKL_L2_BC 1
-#define IS2_HKO_VLAN_TAGGED (IS2_HKO_L2_BC + IS2_HKL_L2_BC)
-#define IS2_HKL_VLAN_TAGGED 1
-#define IS2_HKO_VID (IS2_HKO_VLAN_TAGGED + IS2_HKL_VLAN_TAGGED)
-#define IS2_HKL_VID 12
-#define IS2_HKO_DEI (IS2_HKO_VID + IS2_HKL_VID)
-#define IS2_HKL_DEI 1
-#define IS2_HKO_PCP (IS2_HKO_DEI + IS2_HKL_DEI)
-#define IS2_HKL_PCP 3
-
-/* IS2 half key - MAC_ETYPE/MAC_LLC/MAC_SNAP/OAM common */
-#define IS2_HKO_L2_DMAC (IS2_HKO_PCP + IS2_HKL_PCP)
-#define IS2_HKL_L2_DMAC 48
-#define IS2_HKO_L2_SMAC (IS2_HKO_L2_DMAC + IS2_HKL_L2_DMAC)
-#define IS2_HKL_L2_SMAC 48
-
-/* IS2 half key - MAC_ETYPE */
-#define IS2_HKO_MAC_ETYPE_ETYPE (IS2_HKO_L2_SMAC + IS2_HKL_L2_SMAC)
-#define IS2_HKL_MAC_ETYPE_ETYPE 16
-#define IS2_HKO_MAC_ETYPE_L2_PAYLOAD \
- (IS2_HKO_MAC_ETYPE_ETYPE + IS2_HKL_MAC_ETYPE_ETYPE)
-#define IS2_HKL_MAC_ETYPE_L2_PAYLOAD 27
-
-/* IS2 half key - MAC_LLC */
-#define IS2_HKO_MAC_LLC_L2_LLC IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_MAC_LLC_L2_LLC 40
-
-/* IS2 half key - MAC_SNAP */
-#define IS2_HKO_MAC_SNAP_L2_SNAP IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_MAC_SNAP_L2_SNAP 40
-
-/* IS2 half key - ARP */
-#define IS2_HKO_MAC_ARP_L2_SMAC IS2_HKO_L2_DMAC
-#define IS2_HKL_MAC_ARP_L2_SMAC 48
-#define IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK \
- (IS2_HKO_MAC_ARP_L2_SMAC + IS2_HKL_MAC_ARP_L2_SMAC)
-#define IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK 1
-#define IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK \
- (IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK + IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK)
-#define IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK 1
-#define IS2_HKO_MAC_ARP_ARP_LEN_OK \
- (IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK + \
- IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK)
-#define IS2_HKL_MAC_ARP_ARP_LEN_OK 1
-#define IS2_HKO_MAC_ARP_ARP_TGT_MATCH \
- (IS2_HKO_MAC_ARP_ARP_LEN_OK + IS2_HKL_MAC_ARP_ARP_LEN_OK)
-#define IS2_HKL_MAC_ARP_ARP_TGT_MATCH 1
-#define IS2_HKO_MAC_ARP_ARP_SENDER_MATCH \
- (IS2_HKO_MAC_ARP_ARP_TGT_MATCH + IS2_HKL_MAC_ARP_ARP_TGT_MATCH)
-#define IS2_HKL_MAC_ARP_ARP_SENDER_MATCH 1
-#define IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN \
- (IS2_HKO_MAC_ARP_ARP_SENDER_MATCH + IS2_HKL_MAC_ARP_ARP_SENDER_MATCH)
-#define IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN 1
-#define IS2_HKO_MAC_ARP_ARP_OPCODE \
- (IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN + \
- IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN)
-#define IS2_HKL_MAC_ARP_ARP_OPCODE 2
-#define IS2_HKO_MAC_ARP_L3_IP4_DIP \
- (IS2_HKO_MAC_ARP_ARP_OPCODE + IS2_HKL_MAC_ARP_ARP_OPCODE)
-#define IS2_HKL_MAC_ARP_L3_IP4_DIP 32
-#define IS2_HKO_MAC_ARP_L3_IP4_SIP \
- (IS2_HKO_MAC_ARP_L3_IP4_DIP + IS2_HKL_MAC_ARP_L3_IP4_DIP)
-#define IS2_HKL_MAC_ARP_L3_IP4_SIP 32
-#define IS2_HKO_MAC_ARP_DIP_EQ_SIP \
- (IS2_HKO_MAC_ARP_L3_IP4_SIP + IS2_HKL_MAC_ARP_L3_IP4_SIP)
-#define IS2_HKL_MAC_ARP_DIP_EQ_SIP 1
-
-/* IS2 half key - IP4_TCP_UDP/IP4_OTHER common */
-#define IS2_HKO_IP4 IS2_HKO_L2_DMAC
-#define IS2_HKL_IP4 1
-#define IS2_HKO_L3_FRAGMENT (IS2_HKO_IP4 + IS2_HKL_IP4)
-#define IS2_HKL_L3_FRAGMENT 1
-#define IS2_HKO_L3_FRAG_OFS_GT0 (IS2_HKO_L3_FRAGMENT + IS2_HKL_L3_FRAGMENT)
-#define IS2_HKL_L3_FRAG_OFS_GT0 1
-#define IS2_HKO_L3_OPTIONS (IS2_HKO_L3_FRAG_OFS_GT0 + IS2_HKL_L3_FRAG_OFS_GT0)
-#define IS2_HKL_L3_OPTIONS 1
-#define IS2_HKO_L3_TTL_GT0 (IS2_HKO_L3_OPTIONS + IS2_HKL_L3_OPTIONS)
-#define IS2_HKL_L3_TTL_GT0 1
-#define IS2_HKO_L3_TOS (IS2_HKO_L3_TTL_GT0 + IS2_HKL_L3_TTL_GT0)
-#define IS2_HKL_L3_TOS 8
-#define IS2_HKO_L3_IP4_DIP (IS2_HKO_L3_TOS + IS2_HKL_L3_TOS)
-#define IS2_HKL_L3_IP4_DIP 32
-#define IS2_HKO_L3_IP4_SIP (IS2_HKO_L3_IP4_DIP + IS2_HKL_L3_IP4_DIP)
-#define IS2_HKL_L3_IP4_SIP 32
-#define IS2_HKO_DIP_EQ_SIP (IS2_HKO_L3_IP4_SIP + IS2_HKL_L3_IP4_SIP)
-#define IS2_HKL_DIP_EQ_SIP 1
-
-/* IS2 half key - IP4_TCP_UDP */
-#define IS2_HKO_IP4_TCP_UDP_TCP (IS2_HKO_DIP_EQ_SIP + IS2_HKL_DIP_EQ_SIP)
-#define IS2_HKL_IP4_TCP_UDP_TCP 1
-#define IS2_HKO_IP4_TCP_UDP_L4_DPORT \
- (IS2_HKO_IP4_TCP_UDP_TCP + IS2_HKL_IP4_TCP_UDP_TCP)
-#define IS2_HKL_IP4_TCP_UDP_L4_DPORT 16
-#define IS2_HKO_IP4_TCP_UDP_L4_SPORT \
- (IS2_HKO_IP4_TCP_UDP_L4_DPORT + IS2_HKL_IP4_TCP_UDP_L4_DPORT)
-#define IS2_HKL_IP4_TCP_UDP_L4_SPORT 16
-#define IS2_HKO_IP4_TCP_UDP_L4_RNG \
- (IS2_HKO_IP4_TCP_UDP_L4_SPORT + IS2_HKL_IP4_TCP_UDP_L4_SPORT)
-#define IS2_HKL_IP4_TCP_UDP_L4_RNG 8
-#define IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT \
- (IS2_HKO_IP4_TCP_UDP_L4_RNG + IS2_HKL_IP4_TCP_UDP_L4_RNG)
-#define IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT 1
-#define IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 \
- (IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT + \
- IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT)
-#define IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0 1
-#define IS2_HKO_IP4_TCP_UDP_L4_FIN \
- (IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 + IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0)
-#define IS2_HKL_IP4_TCP_UDP_L4_FIN 1
-#define IS2_HKO_IP4_TCP_UDP_L4_SYN \
- (IS2_HKO_IP4_TCP_UDP_L4_FIN + IS2_HKL_IP4_TCP_UDP_L4_FIN)
-#define IS2_HKL_IP4_TCP_UDP_L4_SYN 1
-#define IS2_HKO_IP4_TCP_UDP_L4_RST \
- (IS2_HKO_IP4_TCP_UDP_L4_SYN + IS2_HKL_IP4_TCP_UDP_L4_SYN)
-#define IS2_HKL_IP4_TCP_UDP_L4_RST 1
-#define IS2_HKO_IP4_TCP_UDP_L4_PSH \
- (IS2_HKO_IP4_TCP_UDP_L4_RST + IS2_HKL_IP4_TCP_UDP_L4_RST)
-#define IS2_HKL_IP4_TCP_UDP_L4_PSH 1
-#define IS2_HKO_IP4_TCP_UDP_L4_ACK \
- (IS2_HKO_IP4_TCP_UDP_L4_PSH + IS2_HKL_IP4_TCP_UDP_L4_PSH)
-#define IS2_HKL_IP4_TCP_UDP_L4_ACK 1
-#define IS2_HKO_IP4_TCP_UDP_L4_URG \
- (IS2_HKO_IP4_TCP_UDP_L4_ACK + IS2_HKL_IP4_TCP_UDP_L4_ACK)
-#define IS2_HKL_IP4_TCP_UDP_L4_URG 1
-#define IS2_HKO_IP4_TCP_UDP_L4_1588_DOM \
- (IS2_HKO_IP4_TCP_UDP_L4_URG + IS2_HKL_IP4_TCP_UDP_L4_URG)
-#define IS2_HKL_IP4_TCP_UDP_L4_1588_DOM 8
-#define IS2_HKO_IP4_TCP_UDP_L4_1588_VER \
- (IS2_HKO_IP4_TCP_UDP_L4_1588_DOM + IS2_HKL_IP4_TCP_UDP_L4_1588_DOM)
-#define IS2_HKL_IP4_TCP_UDP_L4_1588_VER 4
-
-/* IS2 half key - IP4_OTHER */
-#define IS2_HKO_IP4_OTHER_L3_PROTO IS2_HKO_IP4_TCP_UDP_TCP
-#define IS2_HKL_IP4_OTHER_L3_PROTO 8
-#define IS2_HKO_IP4_OTHER_L3_PAYLOAD \
- (IS2_HKO_IP4_OTHER_L3_PROTO + IS2_HKL_IP4_OTHER_L3_PROTO)
-#define IS2_HKL_IP4_OTHER_L3_PAYLOAD 56
-
-/* IS2 half key - IP6_STD */
-#define IS2_HKO_IP6_STD_L3_TTL_GT0 IS2_HKO_L2_DMAC
-#define IS2_HKL_IP6_STD_L3_TTL_GT0 1
-#define IS2_HKO_IP6_STD_L3_IP6_SIP \
- (IS2_HKO_IP6_STD_L3_TTL_GT0 + IS2_HKL_IP6_STD_L3_TTL_GT0)
-#define IS2_HKL_IP6_STD_L3_IP6_SIP 128
-#define IS2_HKO_IP6_STD_L3_PROTO \
- (IS2_HKO_IP6_STD_L3_IP6_SIP + IS2_HKL_IP6_STD_L3_IP6_SIP)
-#define IS2_HKL_IP6_STD_L3_PROTO 8
-
-/* IS2 half key - OAM */
-#define IS2_HKO_OAM_OAM_MEL_FLAGS IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_OAM_OAM_MEL_FLAGS 7
-#define IS2_HKO_OAM_OAM_VER \
- (IS2_HKO_OAM_OAM_MEL_FLAGS + IS2_HKL_OAM_OAM_MEL_FLAGS)
-#define IS2_HKL_OAM_OAM_VER 5
-#define IS2_HKO_OAM_OAM_OPCODE (IS2_HKO_OAM_OAM_VER + IS2_HKL_OAM_OAM_VER)
-#define IS2_HKL_OAM_OAM_OPCODE 8
-#define IS2_HKO_OAM_OAM_FLAGS (IS2_HKO_OAM_OAM_OPCODE + IS2_HKL_OAM_OAM_OPCODE)
-#define IS2_HKL_OAM_OAM_FLAGS 8
-#define IS2_HKO_OAM_OAM_MEPID (IS2_HKO_OAM_OAM_FLAGS + IS2_HKL_OAM_OAM_FLAGS)
-#define IS2_HKL_OAM_OAM_MEPID 16
-#define IS2_HKO_OAM_OAM_CCM_CNTS_EQ0 \
- (IS2_HKO_OAM_OAM_MEPID + IS2_HKL_OAM_OAM_MEPID)
-#define IS2_HKL_OAM_OAM_CCM_CNTS_EQ0 1
-
-/* IS2 half key - SMAC_SIP6 */
-#define IS2_HKO_SMAC_SIP6_IGR_PORT IS2_HKL_TYPE
-#define IS2_HKL_SMAC_SIP6_IGR_PORT VCAP_PORT_WIDTH
-#define IS2_HKO_SMAC_SIP6_L2_SMAC \
- (IS2_HKO_SMAC_SIP6_IGR_PORT + IS2_HKL_SMAC_SIP6_IGR_PORT)
-#define IS2_HKL_SMAC_SIP6_L2_SMAC 48
-#define IS2_HKO_SMAC_SIP6_L3_IP6_SIP \
- (IS2_HKO_SMAC_SIP6_L2_SMAC + IS2_HKL_SMAC_SIP6_L2_SMAC)
-#define IS2_HKL_SMAC_SIP6_L3_IP6_SIP 128
-
-/* IS2 full key - common */
-#define IS2_FKO_TYPE 0
-#define IS2_FKL_TYPE 2
-#define IS2_FKO_FIRST (IS2_FKO_TYPE + IS2_FKL_TYPE)
-#define IS2_FKL_FIRST 1
-#define IS2_FKO_PAG (IS2_FKO_FIRST + IS2_FKL_FIRST)
-#define IS2_FKL_PAG 8
-#define IS2_FKO_IGR_PORT_MASK (IS2_FKO_PAG + IS2_FKL_PAG)
-#define IS2_FKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
-#define IS2_FKO_SERVICE_FRM (IS2_FKO_IGR_PORT_MASK + IS2_FKL_IGR_PORT_MASK)
-#define IS2_FKL_SERVICE_FRM 1
-#define IS2_FKO_HOST_MATCH (IS2_FKO_SERVICE_FRM + IS2_FKL_SERVICE_FRM)
-#define IS2_FKL_HOST_MATCH 1
-#define IS2_FKO_L2_MC (IS2_FKO_HOST_MATCH + IS2_FKL_HOST_MATCH)
-#define IS2_FKL_L2_MC 1
-#define IS2_FKO_L2_BC (IS2_FKO_L2_MC + IS2_FKL_L2_MC)
-#define IS2_FKL_L2_BC 1
-#define IS2_FKO_VLAN_TAGGED (IS2_FKO_L2_BC + IS2_FKL_L2_BC)
-#define IS2_FKL_VLAN_TAGGED 1
-#define IS2_FKO_VID (IS2_FKO_VLAN_TAGGED + IS2_FKL_VLAN_TAGGED)
-#define IS2_FKL_VID 12
-#define IS2_FKO_DEI (IS2_FKO_VID + IS2_FKL_VID)
-#define IS2_FKL_DEI 1
-#define IS2_FKO_PCP (IS2_FKO_DEI + IS2_FKL_DEI)
-#define IS2_FKL_PCP 3
-
-/* IS2 full key - IP6_TCP_UDP/IP6_OTHER common */
-#define IS2_FKO_L3_TTL_GT0 (IS2_FKO_PCP + IS2_FKL_PCP)
-#define IS2_FKL_L3_TTL_GT0 1
-#define IS2_FKO_L3_TOS (IS2_FKO_L3_TTL_GT0 + IS2_FKL_L3_TTL_GT0)
-#define IS2_FKL_L3_TOS 8
-#define IS2_FKO_L3_IP6_DIP (IS2_FKO_L3_TOS + IS2_FKL_L3_TOS)
-#define IS2_FKL_L3_IP6_DIP 128
-#define IS2_FKO_L3_IP6_SIP (IS2_FKO_L3_IP6_DIP + IS2_FKL_L3_IP6_DIP)
-#define IS2_FKL_L3_IP6_SIP 128
-#define IS2_FKO_DIP_EQ_SIP (IS2_FKO_L3_IP6_SIP + IS2_FKL_L3_IP6_SIP)
-#define IS2_FKL_DIP_EQ_SIP 1
-
-/* IS2 full key - IP6_TCP_UDP */
-#define IS2_FKO_IP6_TCP_UDP_TCP (IS2_FKO_DIP_EQ_SIP + IS2_FKL_DIP_EQ_SIP)
-#define IS2_FKL_IP6_TCP_UDP_TCP 1
-#define IS2_FKO_IP6_TCP_UDP_L4_DPORT \
- (IS2_FKO_IP6_TCP_UDP_TCP + IS2_FKL_IP6_TCP_UDP_TCP)
-#define IS2_FKL_IP6_TCP_UDP_L4_DPORT 16
-#define IS2_FKO_IP6_TCP_UDP_L4_SPORT \
- (IS2_FKO_IP6_TCP_UDP_L4_DPORT + IS2_FKL_IP6_TCP_UDP_L4_DPORT)
-#define IS2_FKL_IP6_TCP_UDP_L4_SPORT 16
-#define IS2_FKO_IP6_TCP_UDP_L4_RNG \
- (IS2_FKO_IP6_TCP_UDP_L4_SPORT + IS2_FKL_IP6_TCP_UDP_L4_SPORT)
-#define IS2_FKL_IP6_TCP_UDP_L4_RNG 8
-#define IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT \
- (IS2_FKO_IP6_TCP_UDP_L4_RNG + IS2_FKL_IP6_TCP_UDP_L4_RNG)
-#define IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT 1
-#define IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 \
- (IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT + \
- IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT)
-#define IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0 1
-#define IS2_FKO_IP6_TCP_UDP_L4_FIN \
- (IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 + IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0)
-#define IS2_FKL_IP6_TCP_UDP_L4_FIN 1
-#define IS2_FKO_IP6_TCP_UDP_L4_SYN \
- (IS2_FKO_IP6_TCP_UDP_L4_FIN + IS2_FKL_IP6_TCP_UDP_L4_FIN)
-#define IS2_FKL_IP6_TCP_UDP_L4_SYN 1
-#define IS2_FKO_IP6_TCP_UDP_L4_RST \
- (IS2_FKO_IP6_TCP_UDP_L4_SYN + IS2_FKL_IP6_TCP_UDP_L4_SYN)
-#define IS2_FKL_IP6_TCP_UDP_L4_RST 1
-#define IS2_FKO_IP6_TCP_UDP_L4_PSH \
- (IS2_FKO_IP6_TCP_UDP_L4_RST + IS2_FKL_IP6_TCP_UDP_L4_RST)
-#define IS2_FKL_IP6_TCP_UDP_L4_PSH 1
-#define IS2_FKO_IP6_TCP_UDP_L4_ACK \
- (IS2_FKO_IP6_TCP_UDP_L4_PSH + IS2_FKL_IP6_TCP_UDP_L4_PSH)
-#define IS2_FKL_IP6_TCP_UDP_L4_ACK 1
-#define IS2_FKO_IP6_TCP_UDP_L4_URG \
- (IS2_FKO_IP6_TCP_UDP_L4_ACK + IS2_FKL_IP6_TCP_UDP_L4_ACK)
-#define IS2_FKL_IP6_TCP_UDP_L4_URG 1
-#define IS2_FKO_IP6_TCP_UDP_L4_1588_DOM \
- (IS2_FKO_IP6_TCP_UDP_L4_URG + IS2_FKL_IP6_TCP_UDP_L4_URG)
-#define IS2_FKL_IP6_TCP_UDP_L4_1588_DOM 8
-#define IS2_FKO_IP6_TCP_UDP_L4_1588_VER \
- (IS2_FKO_IP6_TCP_UDP_L4_1588_DOM + IS2_FKL_IP6_TCP_UDP_L4_1588_DOM)
-#define IS2_FKL_IP6_TCP_UDP_L4_1588_VER 4
-
-/* IS2 full key - IP6_OTHER */
-#define IS2_FKO_IP6_OTHER_L3_PROTO IS2_FKO_IP6_TCP_UDP_TCP
-#define IS2_FKL_IP6_OTHER_L3_PROTO 8
-#define IS2_FKO_IP6_OTHER_L3_PAYLOAD \
- (IS2_FKO_IP6_OTHER_L3_PROTO + IS2_FKL_IP6_OTHER_L3_PROTO)
-#define IS2_FKL_IP6_OTHER_L3_PAYLOAD 56
-
-/* IS2 full key - CUSTOM */
-#define IS2_FKO_CUSTOM_CUSTOM_TYPE IS2_FKO_L3_TTL_GT0
-#define IS2_FKL_CUSTOM_CUSTOM_TYPE 1
-#define IS2_FKO_CUSTOM_CUSTOM \
- (IS2_FKO_CUSTOM_CUSTOM_TYPE + IS2_FKL_CUSTOM_CUSTOM_TYPE)
-#define IS2_FKL_CUSTOM_CUSTOM 320
-
-/* IS2 action - BASE_TYPE */
-#define IS2_AO_HIT_ME_ONCE 0
-#define IS2_AL_HIT_ME_ONCE 1
-#define IS2_AO_CPU_COPY_ENA (IS2_AO_HIT_ME_ONCE + IS2_AL_HIT_ME_ONCE)
-#define IS2_AL_CPU_COPY_ENA 1
-#define IS2_AO_CPU_QU_NUM (IS2_AO_CPU_COPY_ENA + IS2_AL_CPU_COPY_ENA)
-#define IS2_AL_CPU_QU_NUM 3
-#define IS2_AO_MASK_MODE (IS2_AO_CPU_QU_NUM + IS2_AL_CPU_QU_NUM)
-#define IS2_AL_MASK_MODE 2
-#define IS2_AO_MIRROR_ENA (IS2_AO_MASK_MODE + IS2_AL_MASK_MODE)
-#define IS2_AL_MIRROR_ENA 1
-#define IS2_AO_LRN_DIS (IS2_AO_MIRROR_ENA + IS2_AL_MIRROR_ENA)
-#define IS2_AL_LRN_DIS 1
-#define IS2_AO_POLICE_ENA (IS2_AO_LRN_DIS + IS2_AL_LRN_DIS)
-#define IS2_AL_POLICE_ENA 1
-#define IS2_AO_POLICE_IDX (IS2_AO_POLICE_ENA + IS2_AL_POLICE_ENA)
-#define IS2_AL_POLICE_IDX 9
-#define IS2_AO_POLICE_VCAP_ONLY (IS2_AO_POLICE_IDX + IS2_AL_POLICE_IDX)
-#define IS2_AL_POLICE_VCAP_ONLY 1
-#define IS2_AO_PORT_MASK (IS2_AO_POLICE_VCAP_ONLY + IS2_AL_POLICE_VCAP_ONLY)
-#define IS2_AL_PORT_MASK VCAP_PORT_CNT
-#define IS2_AO_REW_OP (IS2_AO_PORT_MASK + IS2_AL_PORT_MASK)
-#define IS2_AL_REW_OP 9
-#define IS2_AO_LM_CNT_DIS (IS2_AO_REW_OP + IS2_AL_REW_OP)
-#define IS2_AL_LM_CNT_DIS 1
-#define IS2_AO_ISDX_ENA \
- (IS2_AO_LM_CNT_DIS + IS2_AL_LM_CNT_DIS + 1) /* Reserved bit */
-#define IS2_AL_ISDX_ENA 1
-#define IS2_AO_ACL_ID (IS2_AO_ISDX_ENA + IS2_AL_ISDX_ENA)
-#define IS2_AL_ACL_ID 6
-
-/* IS2 action - SMAC_SIP */
-#define IS2_AO_SMAC_SIP_CPU_COPY_ENA 0
-#define IS2_AL_SMAC_SIP_CPU_COPY_ENA 1
-#define IS2_AO_SMAC_SIP_CPU_QU_NUM 1
-#define IS2_AL_SMAC_SIP_CPU_QU_NUM 3
-#define IS2_AO_SMAC_SIP_FWD_KILL_ENA 4
-#define IS2_AL_SMAC_SIP_FWD_KILL_ENA 1
-#define IS2_AO_SMAC_SIP_HOST_MATCH 5
-#define IS2_AL_SMAC_SIP_HOST_MATCH 1
-
-#endif /* _OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c06600fb47ff..4aa7346cb040 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -1207,6 +1207,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
bool pkt_host = false;
u32 csum_updated = 0;
+ if (!flow_action_basic_hw_stats_types_check(&flow->rule->action,
+ extack))
+ return -EOPNOTSUPP;
+
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
tun_type = NFP_FL_TUNNEL_NONE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index d648e32c0520..2779f1526d1e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1343,26 +1343,6 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int factor;
- if (ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -EOPNOTSUPP;
-
/* Compute factor used to convert coalesce '_usecs' parameters to
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
@@ -1476,6 +1456,8 @@ static int nfp_net_set_channels(struct net_device *netdev,
}
static const struct ethtool_ops nfp_net_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index b454db283aef..8fde6c1f681b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1247,19 +1247,16 @@ static void nfp6000_free(struct nfp_cpp *cpp)
static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int pos;
- u32 reg;
+ u64 dsn;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos) {
+ dsn = pci_get_dsn(pdev);
+ if (!dsn) {
dev_err(dev, "can't find PCIe Serial Number Capability\n");
return -EINVAL;
}
- pci_read_config_dword(pdev, pos + 4, &reg);
- put_unaligned_be16(reg >> 16, serial + 4);
- pci_read_config_dword(pdev, pos + 8, &reg);
- put_unaligned_be32(reg, serial);
+ put_unaligned_be32((u32)(dsn >> 32), serial);
+ put_unaligned_be16((u16)(dsn >> 16), serial + 4);
return 0;
}
@@ -1267,18 +1264,15 @@ static int nfp6000_read_serial(struct device *dev, u8 *serial)
static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int pos;
- u32 reg;
+ u64 dsn;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos) {
+ dsn = pci_get_dsn(pdev);
+ if (!dsn) {
dev_err(dev, "can't find PCIe Serial Number Capability\n");
return -EINVAL;
}
- pci_read_config_dword(pdev, pos + 4, &reg);
-
- return reg & 0xffff;
+ return dsn & 0xffff;
}
static const struct nfp_cpp_operations nfp6000_pcie_ops = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index bb106a32f416..23ccc0da2341 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,12 +12,12 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.20.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT 0x1004
#define DEVCMD_TIMEOUT 10
@@ -42,6 +42,7 @@ struct ionic {
struct dentry *dentry;
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
+ bool is_mgmt_nic;
struct ionic_identity ident;
struct list_head lifs;
struct ionic_lif *master_lif;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 448d7b23b2f7..60fc191a35e5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -15,6 +15,7 @@
static const struct pci_device_id ionic_id_table[] = {
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) },
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, ionic_id_table);
@@ -37,6 +38,9 @@ int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs)
void ionic_bus_free_irq_vectors(struct ionic *ionic)
{
+ if (!ionic->nintrs)
+ return;
+
pci_free_irq_vectors(ionic->pdev);
}
@@ -221,6 +225,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, ionic);
mutex_init(&ionic->dev_cmd_lock);
+ ionic->is_mgmt_nic =
+ ent->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT;
+
/* Query system for DMA addressing limitation for the device. */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
if (err) {
@@ -245,6 +252,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
+ if (!ionic->is_mgmt_nic)
+ pcie_print_link_status(pdev);
err = ionic_map_bars(ionic);
if (err)
@@ -346,6 +355,11 @@ err_out_reset:
ionic_reset(ionic);
err_out_teardown:
ionic_dev_teardown(ionic);
+ /* Don't fail the probe for these errors, keep
+ * the hw interface around for inspection
+ */
+ return 0;
+
err_out_unmap_bars:
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
@@ -369,11 +383,14 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
- ionic_devlink_unregister(ionic);
- ionic_lifs_unregister(ionic);
- ionic_lifs_deinit(ionic);
- ionic_lifs_free(ionic);
- ionic_bus_free_irq_vectors(ionic);
+ if (ionic->master_lif) {
+ ionic_devlink_unregister(ionic);
+ ionic_lifs_unregister(ionic);
+ ionic_lifs_deinit(ionic);
+ ionic_lifs_free(ionic);
+ ionic_bus_free_irq_vectors(ionic);
+ }
+
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 6fb27dcc5787..ed14164468a1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -82,7 +82,7 @@ int ionic_devlink_register(struct ionic *ionic)
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err)
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- else
+ else if (!ionic->is_mgmt_nic)
devlink_port_type_eth_set(&ionic->dl_port,
ionic->master_lif->netdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index f778fff034f5..a233716eac29 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -86,7 +86,6 @@ static void ionic_get_drvinfo(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
@@ -412,28 +411,6 @@ static int ionic_set_coalesce(struct net_device *netdev,
unsigned int i;
u32 coal;
- if (coalesce->rx_max_coalesced_frames ||
- coalesce->rx_coalesce_usecs_irq ||
- coalesce->rx_max_coalesced_frames_irq ||
- coalesce->tx_max_coalesced_frames ||
- coalesce->tx_coalesce_usecs_irq ||
- coalesce->tx_max_coalesced_frames_irq ||
- coalesce->stats_block_coalesce_usecs ||
- coalesce->use_adaptive_rx_coalesce ||
- coalesce->use_adaptive_tx_coalesce ||
- coalesce->pkt_rate_low ||
- coalesce->rx_coalesce_usecs_low ||
- coalesce->rx_max_coalesced_frames_low ||
- coalesce->tx_coalesce_usecs_low ||
- coalesce->tx_max_coalesced_frames_low ||
- coalesce->pkt_rate_high ||
- coalesce->rx_coalesce_usecs_high ||
- coalesce->rx_max_coalesced_frames_high ||
- coalesce->tx_coalesce_usecs_high ||
- coalesce->tx_max_coalesced_frames_high ||
- coalesce->rate_sample_interval)
- return -EINVAL;
-
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n",
@@ -462,7 +439,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (coal != lif->rx_coalesce_hw) {
lif->rx_coalesce_hw = coal;
- if (test_bit(IONIC_LIF_UP, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
@@ -509,11 +486,11 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
- running = test_bit(IONIC_LIF_UP, lif->state);
+ running = test_bit(IONIC_LIF_F_UP, lif->state);
if (running)
ionic_stop(netdev);
@@ -522,7 +499,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
if (running)
ionic_open(netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return 0;
}
@@ -553,11 +530,11 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
- running = test_bit(IONIC_LIF_UP, lif->state);
+ running = test_bit(IONIC_LIF_F_UP, lif->state);
if (running)
ionic_stop(netdev);
@@ -565,7 +542,7 @@ static int ionic_set_channels(struct net_device *netdev,
if (running)
ionic_open(netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return 0;
}
@@ -575,7 +552,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
struct ionic_lif *lif = netdev_priv(netdev);
u32 priv_flags = 0;
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state))
+ if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
priv_flags |= PRIV_F_SW_DBG_STATS;
return priv_flags;
@@ -584,14 +561,10 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ionic_lif *lif = netdev_priv(netdev);
- u32 flags = lif->flags;
- clear_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
+ clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
if (priv_flags & PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
-
- if (flags != lif->flags)
- lif->flags = flags;
+ set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
}
@@ -784,6 +757,7 @@ static int ionic_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops ionic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 54547d53b0f2..77f607a66cd7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -4,8 +4,6 @@
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
-#pragma pack(push, 1)
-
#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
#define IONIC_DEV_INFO_VERSION 1
#define IONIC_IFNAMSIZ 16
@@ -366,7 +364,7 @@ union ionic_lif_config {
u8 rsvd2[2];
__le64 features;
__le32 queue_count[IONIC_QTYPE_MAX];
- };
+ } __packed;
__le32 words[64];
};
@@ -417,7 +415,7 @@ union ionic_lif_identity {
__le32 max_frame_size;
u8 rsvd2[106];
union ionic_lif_config config;
- } eth;
+ } __packed eth;
struct {
u8 version;
@@ -439,8 +437,8 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
- } rdma;
- };
+ } __packed rdma;
+ } __packed;
__le32 words[512];
};
@@ -526,7 +524,7 @@ struct ionic_q_init_cmd {
__le64 sg_ring_base;
__le32 eq_index;
u8 rsvd2[16];
-};
+} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
@@ -1095,7 +1093,7 @@ struct ionic_port_status {
u8 status;
u8 rsvd[51];
struct ionic_xcvr_status xcvr;
-};
+} __packed;
/**
* struct ionic_port_identify_cmd - Port identify command
@@ -1251,7 +1249,7 @@ struct ionic_port_getattr_comp {
u8 pause_type;
u8 loopback_mode;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1319,7 +1317,7 @@ struct ionic_dev_setattr_cmd {
char name[IONIC_IFNAMSIZ];
__le64 features;
u8 rsvd2[60];
- };
+ } __packed;
};
/**
@@ -1334,7 +1332,7 @@ struct ionic_dev_setattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1361,7 +1359,7 @@ struct ionic_dev_getattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1426,7 +1424,7 @@ struct ionic_lif_setattr_cmd {
} rss;
u8 stats_ctl;
u8 rsvd[60];
- };
+ } __packed;
};
/**
@@ -1444,7 +1442,7 @@ struct ionic_lif_setattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1483,7 +1481,7 @@ struct ionic_lif_getattr_comp {
u8 mac[6];
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1688,7 +1686,7 @@ struct ionic_vf_setattr_cmd {
u8 linkstate;
__le64 stats_pa;
u8 pad[60];
- };
+ } __packed;
};
struct ionic_vf_setattr_comp {
@@ -1726,7 +1724,7 @@ struct ionic_vf_getattr_comp {
u8 linkstate;
__le64 stats_pa;
u8 pad[11];
- };
+ } __packed;
u8 color;
};
@@ -2472,7 +2470,7 @@ union ionic_dev_cmd_regs {
union ionic_dev_cmd_comp comp;
u8 rsvd[48];
u32 data[478];
- };
+ } __packed;
u32 words[512];
};
@@ -2485,7 +2483,7 @@ union ionic_dev_regs {
struct {
union ionic_dev_info_regs info;
union ionic_dev_cmd_regs devcmd;
- };
+ } __packed;
__le32 words[1024];
};
@@ -2575,6 +2573,4 @@ struct ionic_identity {
union ionic_qos_identity qos;
};
-#pragma pack(pop)
-
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 191271f6260d..b903016193df 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -84,7 +84,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
netdev_info(netdev, "Link up - %d Gbps\n",
le32_to_cpu(lif->info->status.link_speed) / 1000);
- if (test_bit(IONIC_LIF_UP, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
netif_tx_wake_all_queues(lif->netdev);
netif_carrier_on(netdev);
}
@@ -93,12 +93,12 @@ static void ionic_link_status_check(struct ionic_lif *lif)
/* carrier off first to avoid watchdog timeout */
netif_carrier_off(netdev);
- if (test_bit(IONIC_LIF_UP, lif->state))
+ if (test_bit(IONIC_LIF_F_UP, lif->state))
netif_tx_stop_all_queues(netdev);
}
link_out:
- clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
+ clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
}
static void ionic_link_status_check_request(struct ionic_lif *lif)
@@ -106,7 +106,7 @@ static void ionic_link_status_check_request(struct ionic_lif *lif)
struct ionic_deferred_work *work;
/* we only need one request outstanding at a time */
- if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
+ if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
if (in_interrupt()) {
@@ -424,8 +424,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
IONIC_INTR_MASK_SET);
- new->intr.cpu = new->intr.index % num_online_cpus();
- if (cpu_online(new->intr.cpu))
+ new->intr.cpu = cpumask_local_spread(new->intr.index,
+ dev_to_node(dev));
+ if (new->intr.cpu != -1)
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
@@ -1093,6 +1094,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
IONIC_ETH_HW_VLAN_RX_STRIP |
IONIC_ETH_HW_VLAN_RX_FILTER;
+ u64 old_hw_features;
int err;
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
@@ -1100,9 +1102,13 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if (err)
return err;
+ old_hw_features = lif->hw_features;
lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
ctx.comp.lif_setattr.features);
+ if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
+ ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
+
if ((vlan_flags & features) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
@@ -1149,6 +1155,10 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev_features_t features;
int err;
+ /* no netdev features on the management device */
+ if (lif->ionic->is_mgmt_nic)
+ return 0;
+
/* set up what we expect to support by default */
features = NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -1356,13 +1366,15 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
.cmd.lif_setattr = {
.opcode = IONIC_CMD_LIF_SETATTR,
.attr = IONIC_LIF_ATTR_RSS,
- .rss.types = cpu_to_le16(types),
.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
},
};
unsigned int i, tbl_sz;
- lif->rss_types = types;
+ if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
+ lif->rss_types = types;
+ ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
+ }
if (key)
memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
@@ -1578,7 +1590,7 @@ int ionic_open(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, lif->nxqs);
netif_set_real_num_rx_queues(netdev, lif->nxqs);
- set_bit(IONIC_LIF_UP, lif->state);
+ set_bit(IONIC_LIF_F_UP, lif->state);
ionic_link_status_check_request(lif);
if (netif_carrier_ok(netdev))
@@ -1598,13 +1610,13 @@ int ionic_stop(struct net_device *netdev)
struct ionic_lif *lif = netdev_priv(netdev);
int err = 0;
- if (!test_bit(IONIC_LIF_UP, lif->state)) {
+ if (!test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
__func__, lif->name);
return 0;
}
dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
- clear_bit(IONIC_LIF_UP, lif->state);
+ clear_bit(IONIC_LIF_F_UP, lif->state);
/* carrier off before disabling queues to avoid watchdog timeout */
netif_carrier_off(netdev);
@@ -1688,7 +1700,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
return -EINVAL;
- down_read(&ionic->vf_op_lock);
+ down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
@@ -1698,7 +1710,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
ether_addr_copy(ionic->vfs[vf].macaddr, mac);
}
- up_read(&ionic->vf_op_lock);
+ up_write(&ionic->vf_op_lock);
return ret;
}
@@ -1719,7 +1731,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- down_read(&ionic->vf_op_lock);
+ down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
@@ -1730,7 +1742,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
ionic->vfs[vf].vlanid = vlan;
}
- up_read(&ionic->vf_op_lock);
+ up_write(&ionic->vf_op_lock);
return ret;
}
@@ -1871,7 +1883,7 @@ int ionic_reset_queues(struct ionic_lif *lif)
/* Put off the next watchdog timeout */
netif_trans_update(lif->netdev);
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
@@ -1881,7 +1893,7 @@ int ionic_reset_queues(struct ionic_lif *lif)
if (!err && running)
ionic_open(lif->netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return err;
}
@@ -2048,10 +2060,10 @@ void ionic_lifs_free(struct ionic *ionic)
static void ionic_lif_deinit(struct ionic_lif *lif)
{
- if (!test_bit(IONIC_LIF_INITED, lif->state))
+ if (!test_bit(IONIC_LIF_F_INITED, lif->state))
return;
- clear_bit(IONIC_LIF_INITED, lif->state);
+ clear_bit(IONIC_LIF_F_INITED, lif->state);
ionic_rx_filters_deinit(lif);
ionic_lif_rss_deinit(lif);
@@ -2287,7 +2299,7 @@ static int ionic_lif_init(struct ionic_lif *lif)
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
- set_bit(IONIC_LIF_INITED, lif->state);
+ set_bit(IONIC_LIF_F_INITED, lif->state);
INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
@@ -2375,6 +2387,12 @@ int ionic_lifs_register(struct ionic *ionic)
{
int err;
+ /* the netdev is not registered on the management device, it is
+ * only used as a vehicle for napi operations on the adminq
+ */
+ if (ionic->is_mgmt_nic)
+ return 0;
+
INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
ionic->nb.notifier_call = ionic_lif_notify;
@@ -2408,6 +2426,9 @@ void ionic_lifs_unregister(struct ionic *ionic)
* current model, so don't bother searching the
* ionic->lif for candidates to unregister
*/
+ if (!ionic->master_lif)
+ return;
+
cancel_work_sync(&ionic->master_lif->deferred.work);
cancel_work_sync(&ionic->master_lif->tx_timeout_work);
if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9c5a7dd45f9d..7c0c6fef8c0b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -121,14 +121,14 @@ struct ionic_lif_sw_stats {
};
enum ionic_lif_state_flags {
- IONIC_LIF_INITED,
- IONIC_LIF_SW_DEBUG_STATS,
- IONIC_LIF_UP,
- IONIC_LIF_LINK_CHECK_REQUESTED,
- IONIC_LIF_QUEUE_RESET,
+ IONIC_LIF_F_INITED,
+ IONIC_LIF_F_SW_DEBUG_STATS,
+ IONIC_LIF_F_UP,
+ IONIC_LIF_F_LINK_CHECK_REQUESTED,
+ IONIC_LIF_F_QUEUE_RESET,
/* leave this as last */
- IONIC_LIF_STATE_SIZE
+ IONIC_LIF_F_STATE_SIZE
};
#define IONIC_LIF_NAME_MAX_SZ 32
@@ -136,7 +136,7 @@ struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
struct list_head list;
struct net_device *netdev;
- DECLARE_BITMAP(state, IONIC_LIF_STATE_SIZE);
+ DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
struct ionic *ionic;
bool registered;
unsigned int index;
@@ -179,7 +179,6 @@ struct ionic_lif {
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
- u32 flags;
struct work_struct tx_timeout_work;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a8e3fb73b465..e4a76e66f542 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
+#include <linux/vermagic.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -15,7 +16,6 @@
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
MODULE_AUTHOR("Pensando Systems, Inc");
MODULE_LICENSE("GPL");
-MODULE_VERSION(IONIC_DRV_VERSION);
static const char *ionic_error_to_str(enum ionic_status_code code)
{
@@ -414,7 +414,7 @@ int ionic_identify(struct ionic *ionic)
memset(ident, 0, sizeof(*ident));
ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX);
- strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION,
+ strncpy(ident->drv.driver_ver_str, UTS_RELEASE,
sizeof(ident->drv.driver_ver_str) - 1);
mutex_lock(&ionic->dev_cmd_lock);
@@ -558,8 +558,6 @@ int ionic_port_reset(struct ionic *ionic)
static int __init ionic_init_module(void)
{
- pr_info("%s %s, ver %s\n",
- IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION);
ionic_debugfs_create();
return ionic_bus_register_driver();
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index a1e9796a660a..8f2a8fb029f1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -118,8 +118,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
@@ -151,8 +151,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"txq_%d_%s",
@@ -190,8 +190,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"rxq_%d_cq_%s",
@@ -247,8 +247,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
txqcq = lif_to_txqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
@@ -281,8 +281,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
rxqcq = lif_to_rxqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 020acc300d7e..15ff633e81ba 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -158,7 +158,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
}
/* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
stats->dropped++;
return;
}
@@ -1023,7 +1023,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
int ndescs;
int err;
- if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index d1ce4531d01a..6505f7e2d1db 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1746,7 +1746,8 @@ unlock:
}
static int qede_parse_actions(struct qede_dev *edev,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
int i;
@@ -1756,6 +1757,9 @@ static int qede_parse_actions(struct qede_dev *edev,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_types_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -1970,7 +1974,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action))
+ if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -2038,7 +2042,7 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action))
+ if (qede_parse_actions(edev, flow_action, NULL))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index fbf4cbcf1a65..1305522f72d6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -57,7 +57,7 @@ static int rmnet_register_real_device(struct net_device *real_dev)
if (rmnet_is_real_dev_registered(real_dev))
return 0;
- port = kzalloc(sizeof(*port), GFP_ATOMIC);
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -122,13 +122,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
}
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev || !dev)
+ if (!real_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
+ }
- if (!data[IFLA_RMNET_MUX_ID])
- return -EINVAL;
-
- ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
@@ -139,7 +138,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
goto err0;
port = rmnet_get_port_rtnl(real_dev);
- err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
+ err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep, extack);
if (err)
goto err1;
@@ -263,12 +262,16 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
{
u16 mux_id;
- if (!data || !data[IFLA_RMNET_MUX_ID])
+ if (!data || !data[IFLA_RMNET_MUX_ID]) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID not specified");
return -EINVAL;
+ }
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
+ if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid MUX ID");
return -ERANGE;
+ }
return 0;
}
@@ -406,14 +409,22 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
*/
- if (port->nr_rmnet_devs > 1)
+ if (port->nr_rmnet_devs > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "more than one rmnet dev attached");
return -EINVAL;
+ }
- if (port->rmnet_mode != RMNET_EPMODE_VND)
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
return -EINVAL;
+ }
+
+ if (rmnet_is_real_dev_registered(slave_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "slave cannot be another rmnet dev");
- if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
+ }
err = rmnet_register_real_device(slave_dev);
if (err)
@@ -475,4 +486,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
+MODULE_ALIAS_RTNL_LINK("rmnet");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 26ad40f19c64..d58b51d277f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -212,6 +212,8 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+ rmnet_dev->features |= NETIF_F_LLTX;
+
/* This perm addr will be used as interface identifier by IPv6 */
rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(rmnet_dev->perm_addr);
@@ -222,16 +224,17 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
- struct rmnet_endpoint *ep)
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack)
+
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
- if (ep->egress_dev)
- return -EINVAL;
-
- if (rmnet_get_endpoint(port, id))
+ if (rmnet_get_endpoint(port, id)) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
return -EBUSY;
+ }
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 14d77c709d4a..4967f3461ed1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -11,7 +11,8 @@ int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
- struct rmnet_endpoint *ep);
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack);
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index f081007a245b..ce030e093485 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3889,6 +3889,7 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+ desc->opts2 = 0;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
@@ -3970,17 +3971,15 @@ static int rtl8169_init_ring(struct rtl8169_private *tp)
return rtl8169_rx_fill(tp);
}
-static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
- struct TxDesc *desc)
+static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
{
- unsigned int len = tx_skb->len;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ struct TxDesc *desc = tp->TxDescArray + entry;
- dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
-
- desc->opts1 = 0x00;
- desc->opts2 = 0x00;
- desc->addr = 0x00;
- tx_skb->len = 0;
+ dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
+ DMA_TO_DEVICE);
+ memset(desc, 0, sizeof(*desc));
+ memset(tx_skb, 0, sizeof(*tx_skb));
}
static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
@@ -3996,12 +3995,9 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
if (len) {
struct sk_buff *skb = tx_skb->skb;
- rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
- tp->TxDescArray + entry);
- if (skb) {
+ rtl8169_unmap_tx_skb(tp, entry);
+ if (skb)
dev_consume_skb_any(skb);
- tx_skb->skb = NULL;
- }
}
}
}
@@ -4308,7 +4304,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
err_dma_1:
- rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ rtl8169_unmap_tx_skb(tp, entry);
err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
@@ -4357,13 +4353,15 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
- u16 pci_status, pci_cmd;
+ int pci_status_errs;
+ u16 pci_cmd;
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
- pci_cmd, pci_status);
+ pci_status_errs = pci_status_get_and_clear_errors(pdev);
+
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
+ pci_cmd, pci_status_errs);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4380,11 +4378,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- pci_write_config_word(pdev, PCI_STATUS,
- pci_status & (PCI_STATUS_DETECTED_PARITY |
- PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
-
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
@@ -4395,33 +4388,24 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
dirty_tx = tp->dirty_tx;
smp_rmb();
- tx_left = tp->cur_tx - dirty_tx;
- while (tx_left > 0) {
+ for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
- struct ring_info *tx_skb = tp->tx_skb + entry;
+ struct sk_buff *skb = tp->tx_skb[entry].skb;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
- /* This barrier is needed to keep us from reading
- * any other fields out of the Tx descriptor until
- * we know the status of DescOwn
- */
- dma_rmb();
+ rtl8169_unmap_tx_skb(tp, entry);
- rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
- tp->TxDescArray + entry);
- if (tx_skb->skb) {
+ if (skb) {
pkts_compl++;
- bytes_compl += tx_skb->skb->len;
- napi_consume_skb(tx_skb->skb, budget);
- tx_skb->skb = NULL;
+ bytes_compl += skb->len;
+ napi_consume_skb(skb, budget);
}
dirty_tx++;
- tx_left--;
}
if (tp->dirty_tx != dirty_tx) {
@@ -4560,7 +4544,6 @@ process_pkt:
u64_stats_update_end(&tp->rx_stats.syncp);
}
release_descriptor:
- desc->opts2 = 0;
rtl8169_mark_to_asic(desc);
}
@@ -4844,6 +4827,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
+ netdev_stats_to_stats64(stats, &dev->stats);
+
do {
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
@@ -4856,14 +4841,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_bytes = tp->tx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
- stats->rx_dropped = dev->stats.rx_dropped;
- stats->tx_dropped = dev->stats.tx_dropped;
- stats->rx_length_errors = dev->stats.rx_length_errors;
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_crc_errors = dev->stats.rx_crc_errors;
- stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
- stats->multicast = dev->stats.multicast;
-
/*
* Fetch additional counter values missing in stats collected by driver
* from tally counters.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index c705743d69f7..2cc8184b7e6b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2277,7 +2277,7 @@ static int __init sxgbe_cmdline_opt(char *str)
if (!str || !*str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
- if (!strncmp(opt, "eee_timer:", 6)) {
+ if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 52113b7529d6..3f16bd807c6e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2853,11 +2853,24 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
}
/* Transmit timestamps are only available for 8XXX series. They result
- * in three events per packet. These occur in order, and are:
- * - the normal completion event
+ * in up to three events per packet. These occur in order, and are:
+ * - the normal completion event (may be omitted)
* - the low part of the timestamp
* - the high part of the timestamp
*
+ * It's possible for multiple completion events to appear before the
+ * corresponding timestamps. So we can for example get:
+ * COMP N
+ * COMP N+1
+ * TS_LO N
+ * TS_HI N
+ * TS_LO N+1
+ * TS_HI N+1
+ *
+ * In addition it's also possible for the adjacent completions to be
+ * merged, so we may not see COMP N above. As such, the completion
+ * events are not very useful here.
+ *
* Each part of the timestamp is itself split across two 16 bit
* fields in the event.
*/
@@ -2865,17 +2878,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
switch (tx_ev_type) {
case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
- /* In case of Queue flush or FLR, we might have received
- * the previous TX completion event but not the Timestamp
- * events.
- */
- if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
- efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
- ESF_DZ_TX_DESCR_INDX);
- tx_queue->completed_desc_ptr =
- tx_ev_desc_ptr & tx_queue->ptr_mask;
+ /* Ignore this event - see above. */
break;
case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
@@ -2887,8 +2890,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
ts_part = efx_ef10_extract_event_ts(event);
tx_queue->completed_timestamp_major = ts_part;
- efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ efx_xmit_done_single(tx_queue);
break;
default:
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index da54afaa3c44..66dcab140449 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -20,6 +20,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
extern unsigned int efx_piobuf_size;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index d2d738314c50..c492523b986c 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -600,6 +600,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
if (tx_queue->channel)
tx_queue->channel = channel;
tx_queue->buffer = NULL;
+ tx_queue->cb_page = NULL;
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 993b5769525b..9a637cd67f43 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -582,6 +582,7 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXFH: {
struct efx_rss_context *ctx = &efx->rss_context;
+ __u64 data;
mutex_lock(&efx->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
@@ -591,35 +592,38 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
goto out_unlock;
}
}
- info->data = 0;
+
+ data = 0;
if (!efx_rss_active(ctx)) /* No RSS */
- goto out_unlock;
+ goto out_setdata_unlock;
+
switch (info->flow_type & ~FLOW_RSS) {
case UDP_V4_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- /* fall through */
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
case UDP_V6_FLOW:
if (ctx->rx_hash_udp_4tuple)
- /* fall through */
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
case IPV6_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
+ data = RXH_IP_SRC | RXH_IP_DST;
break;
default:
break;
}
+out_setdata_unlock:
+ info->data = data;
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 392bd5b7017e..b836315bac87 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -208,8 +208,6 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
- * @completed_desc_ptr: Most recent completed pointer - only used with
- * timestamping.
* @completed_timestamp_major: Top part of the most recent tx timestamp.
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
@@ -269,7 +267,6 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
- unsigned int completed_desc_ptr;
u32 completed_timestamp_major;
u32 completed_timestamp_minor;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 696a77c20cb7..19b58563cb78 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -534,6 +534,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
return efx_enqueue_skb(tx_queue, skb);
}
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int read_ptr;
+ bool finished = false;
+
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (!finished) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!efx_tx_buffer_in_use(buffer)) {
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_err(efx, hw, efx->net_dev,
+ "TX queue %d spurious single TX completion\n",
+ tx_queue->queue);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ /* Need to check the flag before dequeueing. */
+ if (buffer->flags & EFX_TX_BUF_SKB)
+ finished = true;
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+ efx_xmit_done_check_empty(tx_queue);
+}
+
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index b1571e9789d0..70876df1da69 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -80,7 +80,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->xmit_more_available = false;
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
@@ -210,10 +209,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
- unlikely(buffer->len == 0)) {
+ if (!efx_tx_buffer_in_use(buffer)) {
netif_err(efx, tx_err, efx->net_dev,
- "TX queue %d spurious TX completion id %x\n",
+ "TX queue %d spurious TX completion id %d\n",
tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
@@ -226,6 +224,19 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
}
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ /* Ensure that read_count is flushed. */
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
@@ -256,15 +267,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_wake_queue(tx_queue->core_txq);
}
- /* Check whether the hardware queue is now empty */
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
- if (tx_queue->read_count == tx_queue->old_write_count) {
- smp_mb();
- tx_queue->empty_read_count =
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
- }
- }
+ efx_xmit_done_check_empty(tx_queue);
}
/* Remove buffers put into a tx_queue for the current packet.
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index f92f1fe3a87f..99cf7ce2f36c 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -21,6 +21,12 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *pkts_compl,
unsigned int *bytes_compl);
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+ return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index db6b2988e632..7305e8e86c51 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -582,40 +582,23 @@ static void ioc3_timer(struct timer_list *t)
/* Try to find a PHY. There is no apparent relation between the MII addresses
* in the SGI documentation and what we find in reality, so we simply probe
- * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
- * onboard IOC3s has the special oddity that probing doesn't seem to find it
- * yet the interface seems to work fine, so if probing fails we for now will
- * simply default to PHY 31 instead of bailing out.
+ * for the PHY.
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- int ioc3_phy_workaround = 1;
- int i, found = 0, res = 0;
u16 word;
+ int i;
for (i = 0; i < 32; i++) {
word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
- found = 1;
- break; /* Found a PHY */
+ ip->mii.phy_id = i;
+ return 0;
}
}
-
- if (!found) {
- if (ioc3_phy_workaround) {
- i = 31;
- } else {
- ip->mii.phy_id = -1;
- res = -ENODEV;
- goto out;
- }
- }
-
- ip->mii.phy_id = i;
-
-out:
- return res;
+ ip->mii.phy_id = -1;
+ return -ENODEV;
}
static void ioc3_mii_start(struct ioc3_private *ip)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 338e25a6374e..9ad927f646e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,6 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
+ select MDIO_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 487099092693..9bdbf589d93f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
+#include <linux/mdio-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
@@ -446,6 +447,8 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
+ const struct mdio_xpcs_ops *xpcs;
+ struct mdio_xpcs_args xpcs_args;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d0356fbd1e43..542784300620 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -24,6 +24,7 @@
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
@@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
* Broadcom tags can look like invalid LLC/SNAP packets and cause the
* hardware to truncate packets on reception.
*/
- if (netdev_uses_dsa(dev))
+ if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
value &= ~GMAC_CONTROL_ACS;
if (mtu > 1500)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index df63b0367aff..c71dd99c8abf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -577,6 +577,18 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
+/* XPCS callbacks */
+#define stmmac_xpcs_validate(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, validate, __args)
+#define stmmac_xpcs_config(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, config, __args)
+#define stmmac_xpcs_get_state(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, get_state, __args)
+#define stmmac_xpcs_link_up(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, link_up, __args)
+#define stmmac_xpcs_probe(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, probe, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b29603ec744c..eae11c585025 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -732,20 +732,6 @@ static int stmmac_set_coalesce(struct net_device *dev,
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int rx_riwt;
- /* Check not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
- (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
@@ -914,6 +900,8 @@ static int stmmac_set_tunable(struct net_device *dev,
}
static const struct ethtool_ops stmmac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cb7a5bad4cfe..f26699d9a050 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -858,33 +858,65 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mask, 1000baseT_Half);
}
- bitmap_and(supported, supported, mac_supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_andnot(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mac_supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_andnot(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mac_supported);
+ linkmode_andnot(supported, supported, mask);
+
+ linkmode_and(state->advertising, state->advertising, mac_supported);
+ linkmode_andnot(state->advertising, state->advertising, mask);
+
+ /* If PCS is supported, check which modes it supports. */
+ stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
}
static void stmmac_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
state->link = 0;
+ stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
+}
+
+static void stmmac_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 ctrl;
+ stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
+
ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl &= ~priv->hw->link.speed_mask;
- if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
- switch (state->speed) {
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
case SPEED_10000:
ctrl |= priv->hw->link.xgmii.speed10000;
break;
@@ -898,7 +930,7 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
return;
}
} else {
- switch (state->speed) {
+ switch (speed) {
case SPEED_2500:
ctrl |= priv->hw->link.speed2500;
break;
@@ -916,46 +948,21 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
}
}
- priv->speed = state->speed;
+ priv->speed = speed;
if (priv->plat->fix_mac_speed)
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
- if (!state->duplex)
+ if (!duplex)
ctrl &= ~priv->hw->link.duplex;
else
ctrl |= priv->hw->link.duplex;
/* Flow Control operation */
- if (state->pause)
- stmmac_mac_flow_ctrl(priv, state->duplex);
+ if (tx_pause && rx_pause)
+ stmmac_mac_flow_ctrl(priv, duplex);
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
-}
-
-static void stmmac_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
-static void stmmac_mac_link_down(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
-}
-
-static void stmmac_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
@@ -1045,6 +1052,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.pcs_poll = true;
+
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
@@ -2689,7 +2700,8 @@ static int stmmac_open(struct net_device *dev)
int ret;
if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
+ priv->hw->xpcs == NULL) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index cfe5d8b73142..b2a707e2ef43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -382,6 +382,14 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
+ if (mdio_bus_data->has_xpcs) {
+ priv->hw->xpcs = mdio_xpcs_get_ops();
+ if (!priv->hw->xpcs) {
+ err = -ENODEV;
+ goto bus_register_fail;
+ }
+ }
+
if (mdio_bus_data->needs_reset)
new_bus->reset = &stmmac_mdio_reset;
@@ -433,6 +441,25 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 1;
}
+ /* Try to probe the XPCS by scanning all addresses. */
+ if (priv->hw->xpcs) {
+ struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
+ int ret, mode = priv->plat->phy_interface;
+ max_addr = PHY_MAX_ADDR;
+
+ xpcs->bus = new_bus;
+
+ for (addr = 0; addr < max_addr; addr++) {
+ xpcs->addr = addr;
+
+ ret = stmmac_xpcs_probe(priv, xpcs, mode);
+ if (!ret) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
if (!found && !mdio_node) {
dev_warn(dev, "No PHY found\n");
mdiobus_unregister(new_bus);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 2aba2673d6c3..07dbe4f5456e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -380,7 +380,7 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
int ret;
if (!priv->dev->phydev)
- return -EBUSY;
+ return -EOPNOTSUPP;
ret = phy_loopback(priv->dev->phydev, true);
if (ret)
@@ -1387,6 +1387,7 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats_type = FLOW_ACTION_HW_STATS_TYPE_ANY;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
@@ -1515,6 +1516,7 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats_type = FLOW_ACTION_HW_STATS_TYPE_ANY;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 7a01dee2f9a8..a0e6118444b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -367,7 +367,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
static int tc_parse_flow_actions(struct stmmac_priv *priv,
struct flow_action *action,
- struct stmmac_flow_entry *entry)
+ struct stmmac_flow_entry *entry,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry *act;
int i;
@@ -375,6 +376,9 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv,
if (!flow_action_has_entries(action))
return -EINVAL;
+ if (!flow_action_basic_hw_stats_types_check(action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -530,7 +534,8 @@ static int tc_add_flow(struct stmmac_priv *priv,
return -ENOENT;
}
- ret = tc_parse_flow_actions(priv, &rule->action, entry);
+ ret = tc_parse_flow_actions(priv, &rule->action, entry,
+ cls->common.extack);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6ec9163e232c..e6d1aa882fa5 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1716,34 +1716,26 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
- u16 cfg;
+ int pci_errs;
/* Interrogate PCI config space for the
* true cause.
*/
- pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
- if (cfg & PCI_STATUS_PARITY)
+ pci_errs = pci_status_get_and_clear_errors(cp->pdev);
+
+ netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
+ if (pci_errs & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
- if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
- if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
- if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+ if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
- if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+ if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
- if (cfg & PCI_STATUS_DETECTED_PARITY)
+ if (pci_errs & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
-
- /* Write the error bits back to clear them. */
- cfg &= (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY);
- pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
}
/* For all PCI errors, we should reset the chip. */
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 8358064fbd48..2d392a7b179a 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -545,37 +545,25 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
}
if (pci_estat & GREG_PCIESTAT_OTHER) {
- u16 pci_cfg_stat;
+ int pci_errs;
/* Interrogate PCI config space for the
* true cause.
*/
- pci_read_config_word(gp->pdev, PCI_STATUS,
- &pci_cfg_stat);
- netdev_err(dev, "Read PCI cfg space status [%04x]\n",
- pci_cfg_stat);
- if (pci_cfg_stat & PCI_STATUS_PARITY)
+ pci_errs = pci_status_get_and_clear_errors(gp->pdev);
+ netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
+ if (pci_errs & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
- if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
- if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
- if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
+ if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
- if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
+ if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
- if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
+ if (pci_errs & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
-
- /* Write the error bits back to clear them. */
- pci_cfg_stat &= (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY);
- pci_write_config_word(gp->pdev,
- PCI_STATUS, pci_cfg_stat);
}
/* For all PCI errors, we should reset the chip. */
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 5fc03c8eba0c..909e7296cecf 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -330,7 +330,7 @@ struct txd_desc {
u16 length;
u32 va_lo;
u32 va_hi;
- struct pbl pbl[0]; /* Fragments */
+ struct pbl pbl[]; /* Fragments */
} __packed;
/* Register region size */
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 53fb8141f1a6..4a73127e10a6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -369,18 +369,20 @@ struct temac_local {
/* Buffer descriptors */
struct cdmac_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct cdmac_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
int tx_bd_ci;
- int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
int rx_bd_tail;
/* DMA channel control setup */
- u32 tx_chnl_ctrl;
- u32 rx_chnl_ctrl;
+ u8 coalesce_count_tx;
+ u8 coalesce_delay_tx;
u8 coalesce_count_rx;
+ u8 coalesce_delay_rx;
struct delayed_work restart_work;
};
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9461acec6f70..dc022cd5bc42 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -58,8 +58,11 @@
#include "ll_temac.h"
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* ---------------------------------------------------------------------
* Low level register access functions
@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
else {
@@ -312,12 +315,12 @@ static void temac_dma_bd_release(struct net_device *ndev)
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v, lp->rx_bd_p);
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v, lp->tx_bd_p);
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v, lp->tx_bd_p);
}
/**
@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev)
dma_addr_t skb_dma_addr;
int i;
- lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
- GFP_KERNEL);
+ lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
+ sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb)
goto out;
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
- + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
- + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
@@ -376,21 +379,22 @@ static int temac_dma_bd_init(struct net_device *ndev)
}
/* Configure DMA channel (irq setup) */
- lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
+ lp->dma_out(lp, TX_CHNL_CTRL,
+ lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
- lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
+ lp->dma_out(lp, RX_CHNL_CTRL,
+ lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
CHNL_CTRL_IRQ_IOE |
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */
lp->tx_bd_ci = 0;
- lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- lp->rx_bd_tail = RX_BD_NUM - 1;
+ lp->rx_bd_tail = lp->rx_bd_num - 1;
/* Enable RX DMA transfers */
wmb();
@@ -785,7 +789,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++;
- if (lp->tx_bd_ci >= TX_BD_NUM)
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
@@ -811,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return NETDEV_TX_BUSY;
tail++;
- if (tail >= TX_BD_NUM)
+ if (tail >= lp->tx_bd_num)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
@@ -826,14 +830,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
- dma_addr_t start_p, tail_p, skb_dma_addr;
+ dma_addr_t tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
- start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
@@ -876,7 +879,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= TX_BD_NUM)
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -886,7 +889,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) {
--frag;
@@ -895,7 +898,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag),
DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
}
dma_unmap_single(ndev->dev.parent,
@@ -914,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
- if (lp->tx_bd_tail >= TX_BD_NUM)
+ if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
skb_tx_timestamp(skb);
@@ -934,7 +937,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp)
return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0)
- available += RX_BD_NUM;
+ available += lp->rx_bd_num;
return available;
}
@@ -1003,7 +1006,7 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_bytes += length;
rx_bd = lp->rx_bd_ci;
- if (++lp->rx_bd_ci >= RX_BD_NUM)
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail);
@@ -1034,7 +1037,7 @@ static void ll_temac_recv(struct net_device *ndev)
dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1;
- if (rx_bd >= RX_BD_NUM)
+ if (rx_bd >= lp->rx_bd_num)
rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd];
@@ -1250,13 +1253,113 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
-/* ethtool support */
+/* ---------------------------------------------------------------------
+ * ethtool support
+ */
+
+static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
+}
+
+static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
+ ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
+ return 0;
+}
+
+static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EFAULT;
+ }
+
+ if (ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -EOPNOTSUPP;
+ if (ec->rx_max_coalesced_frames)
+ lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames)
+ lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
+ /* With typical LocalLink clock speed of 200 MHz and
+ * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
+ */
+ if (ec->rx_coalesce_usecs)
+ lp->coalesce_delay_rx =
+ min(255U, (ec->rx_coalesce_usecs * 100) / 512);
+ if (ec->tx_coalesce_usecs)
+ lp->coalesce_delay_tx =
+ min(255U, (ec->tx_coalesce_usecs * 100) / 512);
+
+ return 0;
+}
+
static const struct ethtool_ops temac_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = ll_temac_ethtools_get_ringparam,
+ .set_ringparam = ll_temac_ethtools_set_ringparam,
+ .get_coalesce = ll_temac_ethtools_get_coalesce,
+ .set_coalesce = ll_temac_ethtools_set_coalesce,
};
static int temac_probe(struct platform_device *pdev)
@@ -1300,6 +1403,8 @@ static int temac_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
@@ -1364,6 +1469,14 @@ static int temac_probe(struct platform_device *pdev)
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
+ /* Defaults for IRQ delay/coalescing setup. These are
+ * configuration values, so does not belong in device-tree.
+ */
+ lp->coalesce_delay_tx = 0x10;
+ lp->coalesce_count_tx = 0x22;
+ lp->coalesce_delay_rx = 0xff;
+ lp->coalesce_count_rx = 0x07;
+
/* Setup LocalLink DMA */
if (temac_np) {
/* Find the DMA node, map the DMA registers, and
@@ -1402,14 +1515,6 @@ static int temac_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
- /* Use defaults for IRQ delay/coalescing setup. These
- * are configuration values, so does not belong in
- * device-tree.
- */
- lp->tx_chnl_ctrl = 0x10220000;
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
-
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
} else if (pdata) {
@@ -1435,18 +1540,13 @@ static int temac_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq(pdev, 1);
/* IRQ delay/coalescing setup */
- if (pdata->tx_irq_timeout || pdata->tx_irq_count)
- lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
- (pdata->tx_irq_count << 16);
- else
- lp->tx_chnl_ctrl = 0x10220000;
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
+ lp->coalesce_delay_tx = pdata->tx_irq_timeout;
+ lp->coalesce_count_tx = pdata->tx_irq_count;
+ }
if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
- lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
- (pdata->rx_irq_count << 16);
+ lp->coalesce_delay_rx = pdata->rx_irq_timeout;
lp->coalesce_count_rx = pdata->rx_irq_count;
- } else {
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
}
}
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index 9c8aa3a95463..cc9ac572423e 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -20,7 +20,7 @@
#include "h/supern_2.h"
#include "h/skfbiinc.h"
#include <linux/bitrev.h>
-#include <linux/pci_regs.h>
+#include <linux/pci.h>
#ifndef lint
static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -112,7 +112,7 @@ static void card_start(struct s_smc *smc)
*/
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_ON) ; /* enable for writes */
word = inpw(PCI_C(PCI_STATUS)) ;
- outpw(PCI_C(PCI_STATUS), word | PCI_ERRBITS) ;
+ outpw(PCI_C(PCI_STATUS), word | PCI_STATUS_ERROR_BITS);
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_OFF) ; /* disable writes */
/*
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index 480795681719..ccee00b71dbc 100644
--- a/drivers/net/fddi/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
@@ -33,11 +33,6 @@
*/
#define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */
-
-#define PCI_ERRBITS (PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_PARITY)
-
-
-
/*
* Control Register File:
* Bank 0
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 245ce2374035..d8e86bdbfba1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1140,23 +1140,6 @@ out:
return ret;
}
-static bool
-netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- diff1.base.speed = 0;
- diff1.base.duplex = 0;
- /* advertising and cmd are usually set */
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.cmd = 0;
- /* We set port to PORT_OTHER */
- diff2.base.port = PORT_OTHER;
-
- return !memcmp(&diff1, &diff2, sizeof(diff1));
-}
-
static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
@@ -1173,6 +1156,12 @@ static int netvsc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
+ struct net_device *vf_netdev;
+
+ vf_netdev = rtnl_dereference(ndc->vf_netdev);
+
+ if (vf_netdev)
+ return __ethtool_get_link_ksettings(vf_netdev, cmd);
cmd->base.speed = ndc->speed;
cmd->base.duplex = ndc->duplex;
@@ -1185,18 +1174,18 @@ static int netvsc_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
- u32 speed;
+ struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
- speed = cmd->base.speed;
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !netvsc_validate_ethtool_ss_cmd(cmd))
- return -EINVAL;
+ if (vf_netdev) {
+ if (!vf_netdev->ethtool_ops->set_link_ksettings)
+ return -EOPNOTSUPP;
- ndc->speed = speed;
- ndc->duplex = cmd->base.duplex;
+ return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
+ cmd);
+ }
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &ndc->speed, &ndc->duplex);
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
new file mode 100644
index 000000000000..b8cb7cadbf75
--- /dev/null
+++ b/drivers/net/ipa/Kconfig
@@ -0,0 +1,19 @@
+config QCOM_IPA
+ tristate "Qualcomm IPA support"
+ depends on ARCH_QCOM && 64BIT && NET
+ select QCOM_QMI_HELPERS
+ select QCOM_MDT_LOADER
+ default QCOM_Q6V5_COMMON
+ help
+ Choose Y or M here to include support for the Qualcomm
+ IP Accelerator (IPA), a hardware block present in some
+ Qualcomm SoCs. The IPA is a programmable protocol processor
+ that is capable of generic hardware handling of IP packets,
+ including routing, filtering, and NAT. Currently the IPA
+ driver supports only basic transport of network traffic
+ between the AP and modem, on the Qualcomm SDM845 SoC.
+
+ Note that if selected, the selection type must match that
+ of QCOM_Q6V5_COMMON (Y or M).
+
+ If unsure, say N.
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
new file mode 100644
index 000000000000..afe5df1e6eee
--- /dev/null
+++ b/drivers/net/ipa/Makefile
@@ -0,0 +1,12 @@
+# Un-comment the next line if you want to validate configuration data
+#ccflags-y += -DIPA_VALIDATE
+
+obj-$(CONFIG_QCOM_IPA) += ipa.o
+
+ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
+ ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
+ ipa_gsi.o ipa_smp2p.o ipa_uc.o \
+ ipa_endpoint.o ipa_cmd.o ipa_modem.o \
+ ipa_qmi.o ipa_qmi_msg.o
+
+ipa-y += ipa_data-sdm845.o ipa_data-sc7180.o
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
new file mode 100644
index 000000000000..845478a19a4f
--- /dev/null
+++ b/drivers/net/ipa/gsi.c
@@ -0,0 +1,2055 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gsi.h"
+#include "gsi_reg.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+
+/**
+ * DOC: The IPA Generic Software Interface
+ *
+ * The generic software interface (GSI) is an integral component of the IPA,
+ * providing a well-defined communication layer between the AP subsystem
+ * and the IPA core. The modem uses the GSI layer as well.
+ *
+ * -------- ---------
+ * | | | |
+ * | AP +<---. .----+ Modem |
+ * | +--. | | .->+ |
+ * | | | | | | | |
+ * -------- | | | | ---------
+ * v | v |
+ * --+-+---+-+--
+ * | GSI |
+ * |-----------|
+ * | |
+ * | IPA |
+ * | |
+ * -------------
+ *
+ * In the above diagram, the AP and Modem represent "execution environments"
+ * (EEs), which are independent operating environments that use the IPA for
+ * data transfer.
+ *
+ * Each EE uses a set of unidirectional GSI "channels," which allow transfer
+ * of data to or from the IPA. A channel is implemented as a ring buffer,
+ * with a DRAM-resident array of "transfer elements" (TREs) available to
+ * describe transfers to or from other EEs through the IPA. A transfer
+ * element can also contain an immediate command, requesting the IPA perform
+ * actions other than data transfer.
+ *
+ * Each TRE refers to a block of data--also located DRAM. After writing one
+ * or more TREs to a channel, the writer (either the IPA or an EE) writes a
+ * doorbell register to inform the receiving side how many elements have
+ * been written.
+ *
+ * Each channel has a GSI "event ring" associated with it. An event ring
+ * is implemented very much like a channel ring, but is always directed from
+ * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
+ * events by adding an entry to the event ring associated with the channel.
+ * The GSI then writes its doorbell for the event ring, causing the target
+ * EE to be interrupted. Each entry in an event ring contains a pointer
+ * to the channel TRE whose completion the event represents.
+ *
+ * Each TRE in a channel ring has a set of flags. One flag indicates whether
+ * the completion of the transfer operation generates an entry (and possibly
+ * an interrupt) in the channel's event ring. Other flags allow transfer
+ * elements to be chained together, forming a single logical transaction.
+ * TRE flags are used to control whether and when interrupts are generated
+ * to signal completion of channel transfers.
+ *
+ * Elements in channel and event rings are completed (or consumed) strictly
+ * in order. Completion of one entry implies the completion of all preceding
+ * entries. A single completion interrupt can therefore communicate the
+ * completion of many transfers.
+ *
+ * Note that all GSI registers are little-endian, which is the assumed
+ * endianness of I/O space accesses. The accessor functions perform byte
+ * swapping if needed (i.e., for a big endian CPU).
+ */
+
+/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
+#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
+
+#define GSI_CMD_TIMEOUT 5 /* seconds */
+
+#define GSI_CHANNEL_STOP_RX_RETRIES 10
+
+#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
+#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
+
+#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
+
+/* An entry in an event ring */
+struct gsi_event {
+ __le64 xfer_ptr;
+ __le16 len;
+ u8 reserved1;
+ u8 code;
+ __le16 reserved2;
+ u8 type;
+ u8 chid;
+};
+
+/* Hardware values from the error log register error code field */
+enum gsi_err_code {
+ GSI_INVALID_TRE_ERR = 0x1,
+ GSI_OUT_OF_BUFFERS_ERR = 0x2,
+ GSI_OUT_OF_RESOURCES_ERR = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+ GSI_EVT_RING_EMPTY_ERR = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
+ GSI_HWO_1_ERR = 0x8,
+};
+
+/* Hardware values from the error log register error type field */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
+/* Hardware values used when programming an event ring */
+enum gsi_evt_chtype {
+ GSI_EVT_CHTYPE_MHI_EV = 0x0,
+ GSI_EVT_CHTYPE_XHCI_EV = 0x1,
+ GSI_EVT_CHTYPE_GPI_EV = 0x2,
+ GSI_EVT_CHTYPE_XDCI_EV = 0x3,
+};
+
+/* Hardware values used when programming a channel */
+enum gsi_channel_protocol {
+ GSI_CHANNEL_PROTOCOL_MHI = 0x0,
+ GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
+ GSI_CHANNEL_PROTOCOL_GPI = 0x2,
+ GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
+};
+
+/* Hardware values representing an event ring immediate command opcode */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
+/* Hardware values representing a generic immediate command opcode */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+};
+
+/* Hardware values representing a channel immediate command opcode */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+};
+
+/** gsi_channel_scratch_gpi - GPI protocol scratch register
+ * @max_outstanding_tre:
+ * Defines the maximum number of TREs allowed in a single transaction
+ * on a channel (in bytes). This determines the amount of prefetch
+ * performed by the hardware. We configure this to equal the size of
+ * the TLV FIFO for the channel.
+ * @outstanding_threshold:
+ * Defines the threshold (in bytes) determining when the sequencer
+ * should update the channel doorbell. We configure this to equal
+ * the size of two TREs.
+ */
+struct gsi_channel_scratch_gpi {
+ u64 reserved1;
+ u16 reserved2;
+ u16 max_outstanding_tre;
+ u16 reserved3;
+ u16 outstanding_threshold;
+};
+
+/** gsi_channel_scratch - channel scratch configuration area
+ *
+ * The exact interpretation of this register is protocol-specific.
+ * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
+ */
+union gsi_channel_scratch {
+ struct gsi_channel_scratch_gpi gpi;
+ struct {
+ u32 word1;
+ u32 word2;
+ u32 word3;
+ u32 word4;
+ } data;
+};
+
+/* Check things that can be validated at build time. */
+static void gsi_validate_build(void)
+{
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
+
+ /* Code assumes the size of channel and event ring element are
+ * the same (and fixed). Make sure the size of an event ring
+ * element is what's expected.
+ */
+ BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
+
+ /* Hardware requires a 2^n ring size. We ensure the number of
+ * elements in an event ring is a power of 2 elsewhere; this
+ * ensure the elements themselves meet the requirement.
+ */
+ BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
+
+ /* The channel element size must fit in this field */
+ BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
+
+ /* The event ring element size must fit in this field */
+ BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
+}
+
+/* Return the channel id associated with a given channel */
+static u32 gsi_channel_id(struct gsi_channel *channel)
+{
+ return channel - &channel->gsi->channel[0];
+}
+
+static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ gsi->event_enable_bitmap |= BIT(evt_ring_id);
+ val = gsi->event_enable_bitmap;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+}
+
+static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask)
+{
+ iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
+}
+
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
+ val = gsi->event_enable_bitmap;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+}
+
+/* Enable all GSI_interrupt types */
+static void gsi_irq_enable(struct gsi *gsi)
+{
+ u32 val;
+
+ /* We don't use inter-EE channel or event interrupts */
+ val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
+ val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
+ val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+
+ val = GENMASK(gsi->channel_count - 1, 0);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ val = GENMASK(gsi->evt_ring_count - 1, 0);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ /* Each IEOB interrupt is enabled (later) as needed by channels */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ val = GSI_CNTXT_GLOB_IRQ_ALL;
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
+ /* Never enable GSI_BREAK_POINT */
+ val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+}
+
+/* Disable all GSI_interrupt types */
+static void gsi_irq_disable(struct gsi *gsi)
+{
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+}
+
+/* Return the virtual address associated with a ring index */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the 32-bit DMA address associated with a ring index */
+static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
+{
+ return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the ring index of a 32-bit ring offset */
+static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
+{
+ return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
+}
+
+/* Issue a GSI command by writing a value to a register, then wait for
+ * completion to be signaled. Returns true if the command completes
+ * or false if it times out.
+ */
+static bool
+gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+{
+ reinit_completion(completion);
+
+ iowrite32(val, gsi->virt + reg);
+
+ return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+}
+
+/* Return the hardware's notion of the current state of an event ring */
+static enum gsi_evt_ring_state
+gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+
+ return u32_get_bits(val, EV_CHSTATE_FMASK);
+}
+
+/* Issue an event ring command and wait for it to complete */
+static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ struct completion *completion = &evt_ring->completion;
+ u32 val;
+
+ val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
+ val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
+
+ if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
+ "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
+
+ return -ETIMEDOUT;
+}
+
+/* Allocate an event ring in NOT_ALLOCATED state */
+static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ int ret;
+
+ /* Get initial event ring state */
+ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
+
+ if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return -EINVAL;
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
+ evt_ring->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Reset a GSI event ring in ALLOCATED or ERROR state. */
+static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state = evt_ring->state;
+ int ret;
+
+ if (state != GSI_EVT_RING_STATE_ALLOCATED &&
+ state != GSI_EVT_RING_STATE_ERROR) {
+ dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
+ evt_ring->state);
+ return;
+ }
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
+ dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
+ evt_ring->state);
+}
+
+/* Issue a hardware de-allocation request for an allocated event ring */
+static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ int ret;
+
+ if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
+ evt_ring->state);
+ return;
+ }
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
+ evt_ring->state);
+}
+
+/* Return the hardware's notion of the current state of a channel */
+static enum gsi_channel_state
+gsi_channel_state(struct gsi *gsi, u32 channel_id)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+
+ return u32_get_bits(val, CHSTATE_FMASK);
+}
+
+/* Issue a channel command and wait for it to complete */
+static int
+gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
+{
+ struct completion *completion = &channel->completion;
+ u32 channel_id = gsi_channel_id(channel);
+ u32 val;
+
+ val = u32_encode_bits(channel_id, CH_CHID_FMASK);
+ val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
+
+ if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out "
+ "(state is %u)\n", opcode, channel_id, channel->state);
+
+ return -ETIMEDOUT;
+}
+
+/* Allocate GSI channel in NOT_ALLOCATED state */
+static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ /* Get initial channel state */
+ channel->state = gsi_channel_state(gsi, channel_id);
+
+ if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
+ channel->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Start an ALLOCATED channel */
+static int gsi_channel_start_command(struct gsi_channel *channel)
+{
+ enum gsi_channel_state state = channel->state;
+ int ret;
+
+ if (state != GSI_CHANNEL_STATE_ALLOCATED &&
+ state != GSI_CHANNEL_STATE_STOPPED)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_START);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) {
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after start\n",
+ channel->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Stop a GSI channel in STARTED state */
+static int gsi_channel_stop_command(struct gsi_channel *channel)
+{
+ enum gsi_channel_state state = channel->state;
+ int ret;
+
+ if (state != GSI_CHANNEL_STATE_STARTED &&
+ state != GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_STOP);
+ if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED)
+ return ret;
+
+ /* We may have to try again if stop is in progress */
+ if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EAGAIN;
+
+ dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n",
+ channel->state);
+
+ return -EIO;
+}
+
+/* Reset a GSI channel in ALLOCATED or ERROR state. */
+static void gsi_channel_reset_command(struct gsi_channel *channel)
+{
+ int ret;
+
+ msleep(1); /* A short delay is required before a RESET command */
+
+ if (channel->state != GSI_CHANNEL_STATE_STOPPED &&
+ channel->state != GSI_CHANNEL_STATE_ERROR) {
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) before reset\n",
+ channel->state);
+ return;
+ }
+
+ ret = gsi_channel_command(channel, GSI_CH_RESET);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after reset\n",
+ channel->state);
+}
+
+/* Deallocate an ALLOCATED GSI channel */
+static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad channel state (%u) before dealloc\n",
+ channel->state);
+ return;
+ }
+
+ ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev, "bad channel state (%u) after dealloc\n",
+ channel->state);
+}
+
+/* Ring an event ring doorbell, reporting the last entry processed by the AP.
+ * The index argument (modulo the ring count) is the first unfilled entry, so
+ * we supply one less than that with the doorbell. Update the event ring
+ * index field with the value provided.
+ */
+static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
+{
+ struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
+ u32 val;
+
+ ring->index = index; /* Next unused entry */
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(ring, (index - 1) % ring->count);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
+}
+
+/* Program an event ring for use */
+static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 val;
+
+ val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
+ val |= EV_INTYPE_FMASK;
+ val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+
+ val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the event ring,
+ * respectively.
+ */
+ val = evt_ring->ring.addr & GENMASK(31, 0);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
+
+ val = evt_ring->ring.addr >> 32;
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
+
+ /* Enable interrupt moderation by setting the moderation delay */
+ val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
+ val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
+
+ /* No MSI write data, and MSI address high and low address is 0 */
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
+
+ /* We don't need to get event read pointer updates */
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
+
+ /* Finally, tell the hardware we've completed event 0 (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+}
+
+/* Return the last (most recent) transaction completed on a channel. */
+static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct gsi_trans *trans;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ if (!list_empty(&trans_info->complete))
+ trans = list_last_entry(&trans_info->complete,
+ struct gsi_trans, links);
+ else if (!list_empty(&trans_info->polled))
+ trans = list_last_entry(&trans_info->polled,
+ struct gsi_trans, links);
+ else
+ trans = NULL;
+
+ /* Caller will wait for this, so take a reference */
+ if (trans)
+ refcount_inc(&trans->refcount);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ return trans;
+}
+
+/* Wait for transaction activity on a channel to complete */
+static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the last transaction, and wait for it to complete */
+ trans = gsi_channel_trans_last(channel);
+ if (trans) {
+ wait_for_completion(&trans->completion);
+ gsi_trans_free(trans);
+ }
+}
+
+/* Stop channel activity. Transactions may not be allocated until thawed. */
+static void gsi_channel_freeze(struct gsi_channel *channel)
+{
+ gsi_channel_trans_quiesce(channel);
+
+ napi_disable(&channel->napi);
+
+ gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
+}
+
+/* Allow transactions to be used on the channel again. */
+static void gsi_channel_thaw(struct gsi_channel *channel)
+{
+ gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+
+ napi_enable(&channel->napi);
+}
+
+/* Program a channel for use */
+static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
+{
+ size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 channel_id = gsi_channel_id(channel);
+ union gsi_channel_scratch scr = { };
+ struct gsi_channel_scratch_gpi *gpi;
+ struct gsi *gsi = channel->gsi;
+ u32 wrr_weight = 0;
+ u32 val;
+
+ /* Arbitrarily pick TRE 0 as the first channel element to use */
+ channel->tre_ring.index = 0;
+
+ /* We program all channels to use GPI protocol */
+ val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
+ if (channel->toward_ipa)
+ val |= CHTYPE_DIR_FMASK;
+ val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
+ val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+
+ val = u32_encode_bits(size, R_LENGTH_FMASK);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the channel ring,
+ * respectively.
+ */
+ val = channel->tre_ring.addr & GENMASK(31, 0);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
+
+ val = channel->tre_ring.addr >> 32;
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
+
+ /* Command channel gets low weighted round-robin priority */
+ if (channel->command)
+ wrr_weight = field_max(WRR_WEIGHT_FMASK);
+ val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
+
+ /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
+
+ /* Enable the doorbell engine if requested */
+ if (doorbell)
+ val |= USE_DB_ENG_FMASK;
+
+ if (!channel->use_prefetch)
+ val |= USE_ESCAPE_BUF_ONLY_FMASK;
+
+ iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
+
+ /* Now update the scratch registers for GPI protocol */
+ gpi = &scr.gpi;
+ gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ GSI_RING_ELEMENT_SIZE;
+ gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
+
+ val = scr.data.word1;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
+
+ val = scr.data.word2;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
+
+ val = scr.data.word3;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
+
+ /* We must preserve the upper 16 bits of the last scratch register.
+ * The next sequence assumes those bits remain unchanged between the
+ * read and the write.
+ */
+ val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+
+ /* All done! */
+}
+
+static void gsi_channel_deprogram(struct gsi_channel *channel)
+{
+ /* Nothing to do */
+}
+
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_start_command(channel);
+
+ mutex_unlock(&gsi->mutex);
+
+ /* Clear the channel's event ring interrupt in case it's pending */
+ gsi_isr_ieob_clear(gsi, BIT(evt_ring_id));
+
+ gsi_channel_thaw(channel);
+
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 retries;
+ int ret;
+
+ gsi_channel_freeze(channel);
+
+ /* Channel could have entered STOPPED state since last call if the
+ * STOP command timed out. We won't stop a channel if stopping it
+ * was successful previously (so we still want the freeze above).
+ */
+ if (channel->state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
+ /* RX channels might require a little time to enter STOPPED state */
+ retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_stop_command(channel);
+ if (ret != -EAGAIN)
+ break;
+ msleep(1);
+ } while (retries--);
+
+ mutex_unlock(&gsi->mutex);
+
+ /* Thaw the channel if we need to retry (or on error) */
+ if (ret)
+ gsi_channel_thaw(channel);
+
+ return ret;
+}
+
+/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ mutex_lock(&gsi->mutex);
+
+ /* Due to a hardware quirk we need to reset RX channels twice. */
+ gsi_channel_reset_command(channel);
+ if (!channel->toward_ipa)
+ gsi_channel_reset_command(channel);
+
+ gsi_channel_program(channel, db_enable);
+ gsi_channel_trans_cancel_pending(channel);
+
+ mutex_unlock(&gsi->mutex);
+}
+
+/* Stop a STARTED channel for suspend (using stop if requested) */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ if (stop)
+ return gsi_channel_stop(gsi, channel_id);
+
+ gsi_channel_freeze(channel);
+
+ return 0;
+}
+
+/* Resume a suspended channel (starting will be requested if STOPPED) */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ if (start)
+ return gsi_channel_start(gsi, channel_id);
+
+ gsi_channel_thaw(channel);
+
+ return 0;
+}
+
+/**
+ * gsi_channel_tx_queued() - Report queued TX transfers for a channel
+ * @channel: Channel for which to report
+ *
+ * Report to the network stack the number of bytes and transactions that
+ * have been queued to hardware since last call. This and the next function
+ * supply information used by the network stack for throttling.
+ *
+ * For each channel we track the number of transactions used and bytes of
+ * data those transactions represent. We also track what those values are
+ * each time this function is called. Subtracting the two tells us
+ * the number of bytes and transactions that have been added between
+ * successive calls.
+ *
+ * Calling this each time we ring the channel doorbell allows us to
+ * provide accurate information to the network stack about how much
+ * work we've given the hardware at any point in time.
+ */
+void gsi_channel_tx_queued(struct gsi_channel *channel)
+{
+ u32 trans_count;
+ u32 byte_count;
+
+ byte_count = channel->byte_count - channel->queued_byte_count;
+ trans_count = channel->trans_count - channel->queued_trans_count;
+ channel->queued_byte_count = channel->byte_count;
+ channel->queued_trans_count = channel->trans_count;
+
+ ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
+ trans_count, byte_count);
+}
+
+/**
+ * gsi_channel_tx_update() - Report completed TX transfers
+ * @channel: Channel that has completed transmitting packets
+ * @trans: Last transation known to be complete
+ *
+ * Compute the number of transactions and bytes that have been transferred
+ * over a TX channel since the given transaction was committed. Report this
+ * information to the network stack.
+ *
+ * At the time a transaction is committed, we record its channel's
+ * committed transaction and byte counts *in the transaction*.
+ * Completions are signaled by the hardware with an interrupt, and
+ * we can determine the latest completed transaction at that time.
+ *
+ * The difference between the byte/transaction count recorded in
+ * the transaction and the count last time we recorded a completion
+ * tells us exactly how much data has been transferred between
+ * completions.
+ *
+ * Calling this each time we learn of a newly-completed transaction
+ * allows us to provide accurate information to the network stack
+ * about how much work has been completed by the hardware at a given
+ * point in time.
+ */
+static void
+gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+{
+ u64 byte_count = trans->byte_count + trans->len;
+ u64 trans_count = trans->trans_count + 1;
+
+ byte_count -= channel->compl_byte_count;
+ channel->compl_byte_count += byte_count;
+ trans_count -= channel->compl_trans_count;
+ channel->compl_trans_count += trans_count;
+
+ ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
+ trans_count, byte_count);
+}
+
+/* Channel control interrupt handler */
+static void gsi_isr_chan_ctrl(struct gsi *gsi)
+{
+ u32 channel_mask;
+
+ channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
+ iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ while (channel_mask) {
+ u32 channel_id = __ffs(channel_mask);
+ struct gsi_channel *channel;
+
+ channel_mask ^= BIT(channel_id);
+
+ channel = &gsi->channel[channel_id];
+ channel->state = gsi_channel_state(gsi, channel_id);
+
+ complete(&channel->completion);
+ }
+}
+
+/* Event ring control interrupt handler */
+static void gsi_isr_evt_ctrl(struct gsi *gsi)
+{
+ u32 event_mask;
+
+ event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
+ iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+ struct gsi_evt_ring *evt_ring;
+
+ event_mask ^= BIT(evt_ring_id);
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
+
+ complete(&evt_ring->completion);
+ }
+}
+
+/* Global channel error interrupt handler */
+static void
+gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
+ complete(&gsi->channel[channel_id].completion);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
+ channel_id, err_ee, code);
+}
+
+/* Global event error interrupt handler */
+static void
+gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ u32 channel_id = gsi_channel_id(evt_ring->channel);
+
+ complete(&evt_ring->completion);
+ dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
+ channel_id);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
+ evt_ring_id, err_ee, code);
+}
+
+/* Global error interrupt handler */
+static void gsi_isr_glob_err(struct gsi *gsi)
+{
+ enum gsi_err_type type;
+ enum gsi_err_code code;
+ u32 which;
+ u32 val;
+ u32 ee;
+
+ /* Get the logged error, then reinitialize the log */
+ val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
+ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+ iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
+
+ ee = u32_get_bits(val, ERR_EE_FMASK);
+ which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
+ type = u32_get_bits(val, ERR_TYPE_FMASK);
+ code = u32_get_bits(val, ERR_CODE_FMASK);
+
+ if (type == GSI_ERR_TYPE_CHAN)
+ gsi_isr_glob_chan_err(gsi, ee, which, code);
+ else if (type == GSI_ERR_TYPE_EVT)
+ gsi_isr_glob_evt_err(gsi, ee, which, code);
+ else /* type GSI_ERR_TYPE_GLOB should be fatal */
+ dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
+}
+
+/* Generic EE interrupt handler */
+static void gsi_isr_gp_int1(struct gsi *gsi)
+{
+ u32 result;
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+ result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
+ if (result != GENERIC_EE_SUCCESS_FVAL)
+ dev_err(gsi->dev, "global INT1 generic result %u\n", result);
+
+ complete(&gsi->completion);
+}
+/* Inter-EE interrupt handler */
+static void gsi_isr_glob_ee(struct gsi *gsi)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
+
+ if (val & ERROR_INT_FMASK)
+ gsi_isr_glob_err(gsi);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
+
+ val &= ~ERROR_INT_FMASK;
+
+ if (val & EN_GP_INT1_FMASK) {
+ val ^= EN_GP_INT1_FMASK;
+ gsi_isr_gp_int1(gsi);
+ }
+
+ if (val)
+ dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
+}
+
+/* I/O completion interrupt event */
+static void gsi_isr_ieob(struct gsi *gsi)
+{
+ u32 event_mask;
+
+ event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_isr_ieob_clear(gsi, event_mask);
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+
+ event_mask ^= BIT(evt_ring_id);
+
+ gsi_irq_ieob_disable(gsi, evt_ring_id);
+ napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
+ }
+}
+
+/* General event interrupts represent serious problems, so report them */
+static void gsi_isr_general(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
+ iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
+
+ if (val)
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+}
+
+/**
+ * gsi_isr() - Top level GSI interrupt service routine
+ * @irq: Interrupt number (ignored)
+ * @dev_id: GSI pointer supplied to request_irq()
+ *
+ * This is the main handler function registered for the GSI IRQ. Each type
+ * of interrupt has a separate handler function that is called from here.
+ */
+static irqreturn_t gsi_isr(int irq, void *dev_id)
+{
+ struct gsi *gsi = dev_id;
+ u32 intr_mask;
+ u32 cnt = 0;
+
+ while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
+ /* intr_mask contains bitmask of pending GSI interrupts */
+ do {
+ u32 gsi_intr = BIT(__ffs(intr_mask));
+
+ intr_mask ^= gsi_intr;
+
+ switch (gsi_intr) {
+ case CH_CTRL_FMASK:
+ gsi_isr_chan_ctrl(gsi);
+ break;
+ case EV_CTRL_FMASK:
+ gsi_isr_evt_ctrl(gsi);
+ break;
+ case GLOB_EE_FMASK:
+ gsi_isr_glob_ee(gsi);
+ break;
+ case IEOB_FMASK:
+ gsi_isr_ieob(gsi);
+ break;
+ case GENERAL_FMASK:
+ gsi_isr_general(gsi);
+ break;
+ default:
+ dev_err(gsi->dev,
+ "%s: unrecognized type 0x%08x\n",
+ __func__, gsi_intr);
+ break;
+ }
+ } while (intr_mask);
+
+ if (++cnt > GSI_ISR_MAX_ITER) {
+ dev_err(gsi->dev, "interrupt flood\n");
+ break;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Return the transaction associated with a transfer completion event */
+static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
+ struct gsi_event *event)
+{
+ u32 tre_offset;
+ u32 tre_index;
+
+ /* Event xfer_ptr records the TRE it's associated with */
+ tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
+ tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
+
+ return gsi_channel_trans_mapped(channel, tre_index);
+}
+
+/**
+ * gsi_evt_ring_rx_update() - Record lengths of received data
+ * @evt_ring: Event ring associated with channel that received packets
+ * @index: Event index in ring reported by hardware
+ *
+ * Events for RX channels contain the actual number of bytes received into
+ * the buffer. Every event has a transaction associated with it, and here
+ * we update transactions to record their actual received lengths.
+ *
+ * This function is called whenever we learn that the GSI hardware has filled
+ * new events since the last time we checked. The ring's index field tells
+ * the first entry in need of processing. The index provided is the
+ * first *unfilled* event in the ring (following the last filled one).
+ *
+ * Events are sequential within the event ring, and transactions are
+ * sequential within the transaction pool.
+ *
+ * Note that @index always refers to an element *within* the event ring.
+ */
+static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+{
+ struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_ring *ring = &evt_ring->ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_event *event_done;
+ struct gsi_event *event;
+ struct gsi_trans *trans;
+ u32 byte_count = 0;
+ u32 old_index;
+ u32 event_avail;
+
+ trans_info = &channel->trans_info;
+
+ /* We'll start with the oldest un-processed event. RX channels
+ * replenish receive buffers in single-TRE transactions, so we
+ * can just map that event to its transaction. Transactions
+ * associated with completion events are consecutive.
+ */
+ old_index = ring->index;
+ event = gsi_ring_virt(ring, old_index);
+ trans = gsi_event_trans(channel, event);
+
+ /* Compute the number of events to process before we wrap,
+ * and determine when we'll be done processing events.
+ */
+ event_avail = ring->count - old_index % ring->count;
+ event_done = gsi_ring_virt(ring, index);
+ do {
+ trans->len = __le16_to_cpu(event->len);
+ byte_count += trans->len;
+
+ /* Move on to the next event and transaction */
+ if (--event_avail)
+ event++;
+ else
+ event = gsi_ring_virt(ring, 0);
+ trans = gsi_trans_pool_next(&trans_info->pool, trans);
+ } while (event != event_done);
+
+ /* We record RX bytes when they are received */
+ channel->byte_count += byte_count;
+ channel->trans_count++;
+}
+
+/* Initialize a ring, including allocating DMA memory for its entries */
+static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
+{
+ size_t size = count * GSI_RING_ELEMENT_SIZE;
+ struct device *dev = gsi->dev;
+ dma_addr_t addr;
+
+ /* Hardware requires a 2^n ring size, with alignment equal to size */
+ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (ring->virt && addr % size) {
+ dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
+ size);
+ return -EINVAL; /* Not a good error value, but distinct */
+ } else if (!ring->virt) {
+ return -ENOMEM;
+ }
+ ring->addr = addr;
+ ring->count = count;
+
+ return 0;
+}
+
+/* Free a previously-allocated ring */
+static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
+{
+ size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
+
+ dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
+}
+
+/* Allocate an available event ring id */
+static int gsi_evt_ring_id_alloc(struct gsi *gsi)
+{
+ u32 evt_ring_id;
+
+ if (gsi->event_bitmap == ~0U) {
+ dev_err(gsi->dev, "event rings exhausted\n");
+ return -ENOSPC;
+ }
+
+ evt_ring_id = ffz(gsi->event_bitmap);
+ gsi->event_bitmap |= BIT(evt_ring_id);
+
+ return (int)evt_ring_id;
+}
+
+/* Free a previously-allocated event ring id */
+static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi->event_bitmap &= ~BIT(evt_ring_id);
+}
+
+/* Ring a channel doorbell, reporting the first un-filled entry */
+void gsi_channel_doorbell(struct gsi_channel *channel)
+{
+ struct gsi_ring *tre_ring = &channel->tre_ring;
+ u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
+ u32 val;
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
+ iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
+}
+
+/* Consult hardware, move any newly completed transactions to completed list */
+static void gsi_channel_update(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ struct gsi_trans *trans;
+ struct gsi_ring *ring;
+ u32 offset;
+ u32 index;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ ring = &evt_ring->ring;
+
+ /* See if there's anything new to process; if not, we're done. Note
+ * that index always refers to an entry *within* the event ring.
+ */
+ offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
+ index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
+ if (index == ring->index % ring->count)
+ return;
+
+ /* Get the transaction for the latest completed event. Take a
+ * reference to keep it from completing before we give the events
+ * for this and previous transactions back to the hardware.
+ */
+ trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
+ refcount_inc(&trans->refcount);
+
+ /* For RX channels, update each completed transaction with the number
+ * of bytes that were actually received. For TX channels, report
+ * the number of transactions and bytes this completion represents
+ * up the network stack.
+ */
+ if (channel->toward_ipa)
+ gsi_channel_tx_update(channel, trans);
+ else
+ gsi_evt_ring_rx_update(evt_ring, index);
+
+ gsi_trans_move_complete(trans);
+
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
+
+ gsi_trans_free(trans);
+}
+
+/**
+ * gsi_channel_poll_one() - Return a single completed transaction on a channel
+ * @channel: Channel to be polled
+ *
+ * @Return: Transaction pointer, or null if none are available
+ *
+ * This function returns the first entry on a channel's completed transaction
+ * list. If that list is empty, the hardware is consulted to determine
+ * whether any new transactions have completed. If so, they're moved to the
+ * completed list and the new first entry is returned. If there are no more
+ * completed transactions, a null pointer is returned.
+ */
+static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the first transaction from the completed list */
+ trans = gsi_channel_trans_complete(channel);
+ if (!trans) {
+ /* List is empty; see if there's more to do */
+ gsi_channel_update(channel);
+ trans = gsi_channel_trans_complete(channel);
+ }
+
+ if (trans)
+ gsi_trans_move_polled(trans);
+
+ return trans;
+}
+
+/**
+ * gsi_channel_poll() - NAPI poll function for a channel
+ * @napi: NAPI structure for the channel
+ * @budget: Budget supplied by NAPI core
+
+ * @Return: Number of items polled (<= budget)
+ *
+ * Single transactions completed by hardware are polled until either
+ * the budget is exhausted, or there are no more. Each transaction
+ * polled is passed to gsi_trans_complete(), to perform remaining
+ * completion processing and retire/free the transaction.
+ */
+static int gsi_channel_poll(struct napi_struct *napi, int budget)
+{
+ struct gsi_channel *channel;
+ int count = 0;
+
+ channel = container_of(napi, struct gsi_channel, napi);
+ while (count < budget) {
+ struct gsi_trans *trans;
+
+ trans = gsi_channel_poll_one(channel);
+ if (!trans)
+ break;
+ gsi_trans_complete(trans);
+ }
+
+ if (count < budget) {
+ napi_complete(&channel->napi);
+ gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+ }
+
+ return count;
+}
+
+/* The event bitmap represents which event ids are available for allocation.
+ * Set bits are not available, clear bits can be used. This function
+ * initializes the map so all events supported by the hardware are available,
+ * then precludes any reserved events from being allocated.
+ */
+static u32 gsi_event_bitmap_init(u32 evt_ring_max)
+{
+ u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
+
+ event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
+
+ return event_bitmap;
+}
+
+/* Setup function for event rings */
+static void gsi_evt_ring_setup(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+/* Inverse of gsi_evt_ring_setup() */
+static void gsi_evt_ring_teardown(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+/* Setup function for a single channel */
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
+ bool db_enable)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ if (!channel->gsi)
+ return 0; /* Ignore uninitialized channels */
+
+ ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
+ if (ret)
+ return ret;
+
+ gsi_evt_ring_program(gsi, evt_ring_id);
+
+ ret = gsi_channel_alloc_command(gsi, channel_id);
+ if (ret)
+ goto err_evt_ring_de_alloc;
+
+ gsi_channel_program(channel, db_enable);
+
+ if (channel->toward_ipa)
+ netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll, NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll, NAPI_POLL_WEIGHT);
+
+ return 0;
+
+err_evt_ring_de_alloc:
+ /* We've done nothing with the event ring yet so don't reset */
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup_one() */
+static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+
+ if (!channel->gsi)
+ return; /* Ignore uninitialized channels */
+
+ netif_napi_del(&channel->napi);
+
+ gsi_channel_deprogram(channel);
+ gsi_channel_de_alloc_command(gsi, channel_id);
+ gsi_evt_ring_reset_command(gsi, evt_ring_id);
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+}
+
+static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
+ enum gsi_generic_cmd_opcode opcode)
+{
+ struct completion *completion = &gsi->completion;
+ u32 val;
+
+ val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
+ val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
+ val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+
+ if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
+ opcode, channel_id);
+
+ return -ETIMEDOUT;
+}
+
+static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
+{
+ return gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_ALLOCATE_CHANNEL);
+}
+
+static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
+{
+ int ret;
+
+ ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
+ if (ret)
+ dev_err(gsi->dev, "error %d halting modem channel %u\n",
+ ret, channel_id);
+}
+
+/* Setup function for channels */
+static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
+{
+ u32 channel_id = 0;
+ u32 mask;
+ int ret;
+
+ gsi_evt_ring_setup(gsi);
+ gsi_irq_enable(gsi);
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
+ if (ret)
+ goto err_unwind;
+ } while (++channel_id < gsi->channel_count);
+
+ /* Make sure no channels were defined that hardware does not support */
+ while (channel_id < GSI_CHANNEL_COUNT_MAX) {
+ struct gsi_channel *channel = &gsi->channel[channel_id++];
+
+ if (!channel->gsi)
+ continue; /* Ignore uninitialized channels */
+
+ dev_err(gsi->dev, "channel %u not supported by hardware\n",
+ channel_id - 1);
+ channel_id = gsi->channel_count;
+ goto err_unwind;
+ }
+
+ /* Allocate modem channels if necessary */
+ mask = gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 modem_channel_id = __ffs(mask);
+
+ ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
+ if (ret)
+ goto err_unwind_modem;
+
+ /* Clear bit from mask only after success (for unwind) */
+ mask ^= BIT(modem_channel_id);
+ }
+
+ mutex_unlock(&gsi->mutex);
+
+ return 0;
+
+err_unwind_modem:
+ /* Compute which modem channels need to be deallocated */
+ mask ^= gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+err_unwind:
+ while (channel_id--)
+ gsi_channel_teardown_one(gsi, channel_id);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+ gsi_evt_ring_teardown(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup() */
+static void gsi_channel_teardown(struct gsi *gsi)
+{
+ u32 mask = gsi->modem_channel_bitmap;
+ u32 channel_id;
+
+ mutex_lock(&gsi->mutex);
+
+ while (mask) {
+ u32 channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+ channel_id = gsi->channel_count - 1;
+ do
+ gsi_channel_teardown_one(gsi, channel_id);
+ while (channel_id--);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+ gsi_evt_ring_teardown(gsi);
+}
+
+/* Setup function for GSI. GSI firmware must be loaded and initialized */
+int gsi_setup(struct gsi *gsi, bool db_enable)
+{
+ u32 val;
+
+ /* Here is where we first touch the GSI hardware */
+ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
+ if (!(val & ENABLED_FMASK)) {
+ dev_err(gsi->dev, "GSI has not been enabled\n");
+ return -EIO;
+ }
+
+ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+ gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ if (!gsi->channel_count) {
+ dev_err(gsi->dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(gsi->dev,
+ "limiting to %u channels (hardware supports %u)\n",
+ GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ }
+
+ gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (!gsi->evt_ring_count) {
+ dev_err(gsi->dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(gsi->dev,
+ "limiting to %u event rings (hardware supports %u)\n",
+ GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+ }
+
+ /* Initialize the error log */
+ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+
+ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
+ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+
+ return gsi_channel_setup(gsi, db_enable);
+}
+
+/* Inverse of gsi_setup() */
+void gsi_teardown(struct gsi *gsi)
+{
+ gsi_channel_teardown(gsi);
+}
+
+/* Initialize a channel's event ring */
+static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
+{
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ int ret;
+
+ ret = gsi_evt_ring_id_alloc(gsi);
+ if (ret < 0)
+ return ret;
+ channel->evt_ring_id = ret;
+
+ evt_ring = &gsi->evt_ring[channel->evt_ring_id];
+ evt_ring->channel = channel;
+
+ ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
+ if (!ret)
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
+ ret, gsi_channel_id(channel));
+
+ gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_evt_ring_init() */
+static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ gsi_ring_free(gsi, &evt_ring->ring);
+ gsi_evt_ring_id_free(gsi, evt_ring_id);
+}
+
+/* Init function for event rings */
+static void gsi_evt_ring_init(struct gsi *gsi)
+{
+ u32 evt_ring_id = 0;
+
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->event_enable_bitmap = 0;
+ do
+ init_completion(&gsi->evt_ring[evt_ring_id].completion);
+ while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
+}
+
+/* Inverse of gsi_evt_ring_init() */
+static void gsi_evt_ring_exit(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+static bool gsi_channel_data_valid(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data)
+{
+#ifdef IPA_VALIDATION
+ u32 channel_id = data->channel_id;
+ struct device *dev = gsi->dev;
+
+ /* Make sure channel ids are in the range driver supports */
+ if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
+ dev_err(dev, "bad channel id %u (must be less than %u)\n",
+ channel_id, GSI_CHANNEL_COUNT_MAX);
+ return false;
+ }
+
+ if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
+ dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
+ return false;
+ }
+
+ if (!data->channel.tlv_count ||
+ data->channel.tlv_count > GSI_TLV_MAX) {
+ dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
+ channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ /* We have to allow at least one maximally-sized transaction to
+ * be outstanding (which would use tlv_count TREs). Given how
+ * gsi_channel_tre_max() is computed, tre_count has to be almost
+ * twice the TLV FIFO size to satisfy this requirement.
+ */
+ if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
+ channel_id, data->channel.tlv_count,
+ data->channel.tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(data->channel.tre_count)) {
+ dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
+ channel_id, data->channel.tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(data->channel.event_count)) {
+ dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
+ channel_id, data->channel.event_count);
+ return false;
+ }
+#endif /* IPA_VALIDATION */
+
+ return true;
+}
+
+/* Init function for a single channel */
+static int gsi_channel_init_one(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data,
+ bool command, bool prefetch)
+{
+ struct gsi_channel *channel;
+ u32 tre_count;
+ int ret;
+
+ if (!gsi_channel_data_valid(gsi, data))
+ return -EINVAL;
+
+ /* Worst case we need an event for every outstanding TRE */
+ if (data->channel.tre_count > data->channel.event_count) {
+ dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
+ data->channel_id, data->channel.tre_count);
+ tre_count = data->channel.event_count;
+ } else {
+ tre_count = data->channel.tre_count;
+ }
+
+ channel = &gsi->channel[data->channel_id];
+ memset(channel, 0, sizeof(*channel));
+
+ channel->gsi = gsi;
+ channel->toward_ipa = data->toward_ipa;
+ channel->command = command;
+ channel->use_prefetch = command && prefetch;
+ channel->tlv_count = data->channel.tlv_count;
+ channel->tre_count = tre_count;
+ channel->event_count = data->channel.event_count;
+ init_completion(&channel->completion);
+
+ ret = gsi_channel_evt_ring_init(channel);
+ if (ret)
+ goto err_clear_gsi;
+
+ ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
+ if (ret) {
+ dev_err(gsi->dev, "error %d allocating channel %u ring\n",
+ ret, data->channel_id);
+ goto err_channel_evt_ring_exit;
+ }
+
+ ret = gsi_channel_trans_init(gsi, data->channel_id);
+ if (ret)
+ goto err_ring_free;
+
+ if (command) {
+ u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
+
+ ret = ipa_cmd_pool_init(channel, tre_max);
+ }
+ if (!ret)
+ return 0; /* Success! */
+
+ gsi_channel_trans_exit(channel);
+err_ring_free:
+ gsi_ring_free(gsi, &channel->tre_ring);
+err_channel_evt_ring_exit:
+ gsi_channel_evt_ring_exit(channel);
+err_clear_gsi:
+ channel->gsi = NULL; /* Mark it not (fully) initialized */
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init_one() */
+static void gsi_channel_exit_one(struct gsi_channel *channel)
+{
+ if (!channel->gsi)
+ return; /* Ignore uninitialized channels */
+
+ if (channel->command)
+ ipa_cmd_pool_exit(channel);
+ gsi_channel_trans_exit(channel);
+ gsi_ring_free(channel->gsi, &channel->tre_ring);
+ gsi_channel_evt_ring_exit(channel);
+}
+
+/* Init function for channels */
+static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
+ const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc)
+{
+ int ret = 0;
+ u32 i;
+
+ gsi_evt_ring_init(gsi);
+
+ /* The endpoint data array is indexed by endpoint name */
+ for (i = 0; i < count; i++) {
+ bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
+
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue; /* Skip over empty slots */
+
+ /* Mark modem channels to be allocated (hardware workaround) */
+ if (data[i].ee_id == GSI_EE_MODEM) {
+ if (modem_alloc)
+ gsi->modem_channel_bitmap |=
+ BIT(data[i].channel_id);
+ continue;
+ }
+
+ ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ if (ret)
+ goto err_unwind;
+ }
+
+ return ret;
+
+err_unwind:
+ while (i--) {
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue;
+ if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
+ gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
+ continue;
+ }
+ gsi_channel_exit_one(&gsi->channel[data->channel_id]);
+ }
+ gsi_evt_ring_exit(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init() */
+static void gsi_channel_exit(struct gsi *gsi)
+{
+ u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
+
+ do
+ gsi_channel_exit_one(&gsi->channel[channel_id]);
+ while (channel_id--);
+ gsi->modem_channel_bitmap = 0;
+
+ gsi_evt_ring_exit(gsi);
+}
+
+/* Init function for GSI. GSI hardware does not need to be "ready" */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
+ u32 count, const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc)
+{
+ struct resource *res;
+ resource_size_t size;
+ unsigned int irq;
+ int ret;
+
+ gsi_validate_build();
+
+ gsi->dev = &pdev->dev;
+
+ /* The GSI layer performs NAPI on all endpoints. NAPI requires a
+ * network device structure, but the GSI layer does not have one,
+ * so we must create a dummy network device for this purpose.
+ */
+ init_dummy_netdev(&gsi->dummy_dev);
+
+ /* Get the GSI IRQ and request for it to wake the system */
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0) {
+ dev_err(gsi->dev,
+ "DT error %d getting \"gsi\" IRQ property\n", ret);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
+ if (ret) {
+ dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ return ret;
+ }
+ gsi->irq = irq;
+
+ ret = enable_irq_wake(gsi->irq);
+ if (ret)
+ dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
+ gsi->irq_wake_enabled = !ret;
+
+ /* Get GSI memory range and map it */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
+ if (!res) {
+ dev_err(gsi->dev,
+ "DT error getting \"gsi\" memory property\n");
+ ret = -ENODEV;
+ goto err_disable_irq_wake;
+ }
+
+ size = resource_size(res);
+ if (res->start > U32_MAX || size > U32_MAX - res->start) {
+ dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
+ ret = -EINVAL;
+ goto err_disable_irq_wake;
+ }
+
+ gsi->virt = ioremap(res->start, size);
+ if (!gsi->virt) {
+ dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
+ ret = -ENOMEM;
+ goto err_disable_irq_wake;
+ }
+
+ ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ if (ret)
+ goto err_iounmap;
+
+ mutex_init(&gsi->mutex);
+ init_completion(&gsi->completion);
+
+ return 0;
+
+err_iounmap:
+ iounmap(gsi->virt);
+err_disable_irq_wake:
+ if (gsi->irq_wake_enabled)
+ (void)disable_irq_wake(gsi->irq);
+ free_irq(gsi->irq, gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_init() */
+void gsi_exit(struct gsi *gsi)
+{
+ mutex_destroy(&gsi->mutex);
+ gsi_channel_exit(gsi);
+ if (gsi->irq_wake_enabled)
+ (void)disable_irq_wake(gsi->irq);
+ free_irq(gsi->irq, gsi);
+ iounmap(gsi->virt);
+}
+
+/* The maximum number of outstanding TREs on a channel. This limits
+ * a channel's maximum number of transactions outstanding (worst case
+ * is one TRE per transaction).
+ *
+ * The absolute limit is the number of TREs in the channel's TRE ring,
+ * and in theory we should be able use all of them. But in practice,
+ * doing that led to the hardware reporting exhaustion of event ring
+ * slots for writing completion information. So the hardware limit
+ * would be (tre_count - 1).
+ *
+ * We reduce it a bit further though. Transaction resource pools are
+ * sized to be a little larger than this maximum, to allow resource
+ * allocations to always be contiguous. The number of entries in a
+ * TRE ring buffer is a power of 2, and the extra resources in a pool
+ * tends to nearly double the memory allocated for it. Reducing the
+ * maximum number of outstanding TREs allows the number of entries in
+ * a pool to avoid crossing that power-of-2 boundary, and this can
+ * substantially reduce pool memory requirements. The number we
+ * reduce it by matches the number added in gsi_trans_pool_init().
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ /* Hardware limit is channel->tre_count - 1 */
+ return channel->tre_count - (channel->tlv_count - 1);
+}
+
+/* Returns the maximum number of TREs in a single transaction for a channel */
+u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ return channel->tlv_count;
+}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
new file mode 100644
index 000000000000..0698ff1ae7a6
--- /dev/null
+++ b/drivers/net/ipa/gsi.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_H_
+#define _GSI_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+/* Maximum number of channels and event rings supported by the driver */
+#define GSI_CHANNEL_COUNT_MAX 17
+#define GSI_EVT_RING_COUNT_MAX 13
+
+/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
+#define GSI_TLV_MAX 64
+
+struct device;
+struct scatterlist;
+struct platform_device;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_channel_data;
+struct ipa_gsi_endpoint_data;
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0,
+ GSI_EE_MODEM = 1,
+ GSI_EE_UC = 2,
+ GSI_EE_TZ = 3,
+};
+
+struct gsi_ring {
+ void *virt; /* ring array base address */
+ dma_addr_t addr; /* primarily low 32 bits used */
+ u32 count; /* number of elements in ring */
+
+ /* The ring index value indicates the next "open" entry in the ring.
+ *
+ * A channel ring consists of TRE entries filled by the AP and passed
+ * to the hardware for processing. For a channel ring, the ring index
+ * identifies the next unused entry to be filled by the AP.
+ *
+ * An event ring consists of event structures filled by the hardware
+ * and passed to the AP. For event rings, the ring index identifies
+ * the next ring entry that is not known to have been filled by the
+ * hardware.
+ */
+ u32 index;
+};
+
+/* Transactions use several resources that can be allocated dynamically
+ * but taken from a fixed-size pool. The number of elements required for
+ * the pool is limited by the total number of TREs that can be outstanding.
+ *
+ * If sufficient TREs are available to reserve for a transaction,
+ * allocation from these pools is guaranteed to succeed. Furthermore,
+ * these resources are implicitly freed whenever the TREs in the
+ * transaction they're associated with are released.
+ *
+ * The result of a pool allocation of multiple elements is always
+ * contiguous.
+ */
+struct gsi_trans_pool {
+ void *base; /* base address of element pool */
+ u32 count; /* # elements in the pool */
+ u32 free; /* next free element in pool (modulo) */
+ u32 size; /* size (bytes) of an element */
+ u32 max_alloc; /* max allocation request */
+ dma_addr_t addr; /* DMA address if DMA pool (or 0) */
+};
+
+struct gsi_trans_info {
+ atomic_t tre_avail; /* TREs available for allocation */
+ struct gsi_trans_pool pool; /* transaction pool */
+ struct gsi_trans_pool sg_pool; /* scatterlist pool */
+ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
+ struct gsi_trans_pool info_pool;/* command information pool */
+ struct gsi_trans **map; /* TRE -> transaction map */
+
+ spinlock_t spinlock; /* protects updates to the lists */
+ struct list_head alloc; /* allocated, not committed */
+ struct list_head pending; /* committed, awaiting completion */
+ struct list_head complete; /* completed, awaiting poll */
+ struct list_head polled; /* returned by gsi_channel_poll_one() */
+};
+
+/* Hardware values signifying the state of a channel */
+enum gsi_channel_state {
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_ERROR = 0xf,
+};
+
+/* We only care about channels between IPA and AP */
+struct gsi_channel {
+ struct gsi *gsi;
+ bool toward_ipa;
+ bool command; /* AP command TX channel or not */
+ bool use_prefetch; /* use prefetch (else escape buf) */
+
+ u8 tlv_count; /* # entries in TLV FIFO */
+ u16 tre_count;
+ u16 event_count;
+
+ struct completion completion; /* signals channel state changes */
+ enum gsi_channel_state state;
+
+ struct gsi_ring tre_ring;
+ u32 evt_ring_id;
+
+ u64 byte_count; /* total # bytes transferred */
+ u64 trans_count; /* total # transactions */
+ /* The following counts are used only for TX endpoints */
+ u64 queued_byte_count; /* last reported queued byte count */
+ u64 queued_trans_count; /* ...and queued trans count */
+ u64 compl_byte_count; /* last reported completed byte count */
+ u64 compl_trans_count; /* ...and completed trans count */
+
+ struct gsi_trans_info trans_info;
+
+ struct napi_struct napi;
+};
+
+/* Hardware values signifying the state of an event ring */
+enum gsi_evt_ring_state {
+ GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
+ GSI_EVT_RING_STATE_ALLOCATED = 0x1,
+ GSI_EVT_RING_STATE_ERROR = 0xf,
+};
+
+struct gsi_evt_ring {
+ struct gsi_channel *channel;
+ struct completion completion; /* signals event ring state changes */
+ enum gsi_evt_ring_state state;
+ struct gsi_ring ring;
+};
+
+struct gsi {
+ struct device *dev; /* Same as IPA device */
+ struct net_device dummy_dev; /* needed for NAPI */
+ void __iomem *virt;
+ u32 irq;
+ bool irq_wake_enabled;
+ u32 channel_count;
+ u32 evt_ring_count;
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ u32 event_bitmap;
+ u32 event_enable_bitmap;
+ u32 modem_channel_bitmap;
+ struct completion completion; /* for global EE commands */
+ struct mutex mutex; /* protects commands, programming */
+};
+
+/**
+ * gsi_setup() - Set up the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @db_enable: Whether to use the GSI doorbell engine
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Performs initialization that must wait until the GSI hardware is
+ * ready (including firmware loaded).
+ */
+int gsi_setup(struct gsi *gsi, bool db_enable);
+
+/**
+ * gsi_teardown() - Tear down GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_setup() call
+ */
+void gsi_teardown(struct gsi *gsi);
+
+/**
+ * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * @Return: The maximum number of TREs oustanding on the channel
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * @Return: The maximum TRE count per transaction on the channel
+ */
+u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_start() - Start an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to start
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_stop() - Stop a started GSI channel
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Channel to stop
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_reset() - Reset an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to be reset
+ * @db_enable: Whether doorbell engine should be enabled
+ *
+ * Reset a channel and reconfigure it. The @db_enable flag indicates
+ * whether the doorbell engine will be enabled following reconfiguration.
+ *
+ * GSI hardware relinquishes ownership of all pending receive buffer
+ * transactions and they will complete with their cancelled flag set.
+ */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable);
+
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
+
+/**
+ * gsi_init() - Initialize the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @pdev: IPA platform device
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Early stage initialization of the GSI subsystem, performing tasks
+ * that can be done before the GSI hardware is ready to use.
+ */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
+ u32 count, const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc);
+
+/**
+ * gsi_exit() - Exit the GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_init() call
+ */
+void gsi_exit(struct gsi *gsi);
+
+#endif /* _GSI_H_ */
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
new file mode 100644
index 000000000000..b57d0198ebc1
--- /dev/null
+++ b/drivers/net/ipa/gsi_private.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_PRIVATE_H_
+#define _GSI_PRIVATE_H_
+
+/* === Only "gsi.c" and "gsi_trans.c" should include this file === */
+
+#include <linux/types.h>
+
+struct gsi_trans;
+struct gsi_ring;
+struct gsi_channel;
+
+#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
+
+/* Return the entry that follows one provided in a transaction pool */
+void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
+
+/**
+ * gsi_trans_move_complete() - Mark a GSI transaction completed
+ * @trans: Transaction to commit
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_move_polled() - Mark a transaction polled
+ * @trans: Transaction to update
+ */
+void gsi_trans_move_polled(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_complete() - Complete a GSI transaction
+ * @trans: Transaction to complete
+ *
+ * Marks a transaction complete (including freeing it).
+ */
+void gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_channel_trans_mapped() - Return a transaction mapped to a TRE index
+ * @channel: Channel associated with the transaction
+ * @index: Index of the TRE having a transaction
+ *
+ * @Return: The GSI transaction pointer associated with the TRE index
+ */
+struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
+ u32 index);
+
+/**
+ * gsi_channel_trans_complete() - Return a channel's next completed transaction
+ * @channel: Channel whose next transaction is to be returned
+ *
+ * @Return: The next completed transaction, or NULL if nothing new
+ */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_cancel_pending() - Cancel pending transactions
+ * @channel: Channel whose pending transactions should be cancelled
+ *
+ * Cancel all pending transactions on a channel. These are transactions
+ * that have been committed but not yet completed. This is required when
+ * the channel gets reset. At that time all pending transactions will be
+ * marked as cancelled.
+ *
+ * NOTE: Transactions already complete at the time of this call are
+ * unaffected.
+ */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_init() - Initialize a channel's GSI transaction info
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ *
+ * @Return: 0 if successful, or -ENOMEM on allocation failure
+ *
+ * Creates and sets up information for managing transactions on a channel
+ */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_exit() - Inverse of gsi_channel_trans_init()
+ * @channel: Channel whose transaction information is to be cleaned up
+ */
+void gsi_channel_trans_exit(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_doorbell() - Ring a channel's doorbell
+ * @channel: Channel whose doorbell should be rung
+ *
+ * Rings a channel's doorbell to inform the GSI hardware that new
+ * transactions (TREs, really) are available for it to process.
+ */
+void gsi_channel_doorbell(struct gsi_channel *channel);
+
+/**
+ * gsi_ring_virt() - Return virtual address for a ring entry
+ * @ring: Ring whose address is to be translated
+ * @addr: Index (slot number) of entry
+ */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
+
+/**
+ * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
+ * @channel: Channel whose bytes have been queued
+ *
+ * This arranges for the the number of transactions and bytes for
+ * transfer that have been queued to hardware to be reported. It
+ * passes this information up the network stack so it can be used to
+ * throttle transmissions.
+ */
+void gsi_channel_tx_queued(struct gsi_channel *channel);
+
+#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
new file mode 100644
index 000000000000..7613b9cc7cf6
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_REG_H_
+#define _GSI_REG_H_
+
+/* === Only "gsi.c" should include this file === */
+
+#include <linux/bits.h>
+
+/**
+ * DOC: GSI Registers
+ *
+ * GSI registers are located within the "gsi" address space defined by Device
+ * Tree. The offset of each register within that space is specified by
+ * symbols defined below. The GSI address space is mapped to virtual memory
+ * space in gsi_init(). All GSI registers are 32 bits wide.
+ *
+ * Each register type is duplicated for a number of instances of something.
+ * For example, each GSI channel has its own set of registers defining its
+ * configuration. The offset to a channel's set of registers is computed
+ * based on a "base" offset plus an additional "stride" amount computed
+ * from the channel's ID. For such registers, the offset is computed by a
+ * function-like macro that takes a parameter used in the computation.
+ *
+ * The offset of a register dependent on execution environment is computed
+ * by a macro that is supplied a parameter "ee". The "ee" value is a member
+ * of the gsi_ee_id enumerated type.
+ *
+ * The offset of a channel register is computed by a macro that is supplied a
+ * parameter "ch". The "ch" value is a channel id whose maximum value is 30
+ * (though the actual limit is hardware-dependent).
+ *
+ * The offset of an event register is computed by a macro that is supplied a
+ * parameter "ev". The "ev" value is an event id whose maximum value is 15
+ * (though the actual limit is hardware-dependent).
+ */
+
+#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+ (0x0000c018 + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0000c01c + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0000c028 + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
+ GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0000c02c + 0x1000 * (ee))
+
+#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
+ (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
+#define CHTYPE_DIR_FMASK GENMASK(3, 3)
+#define EE_FMASK GENMASK(7, 4)
+#define CHID_FMASK GENMASK(12, 8)
+/* The next field is present for GSI v2.0 and above */
+#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
+#define ERINDEX_FMASK GENMASK(18, 14)
+#define CHSTATE_FMASK GENMASK(23, 20)
+#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
+#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
+ (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+#define R_LENGTH_FMASK GENMASK(15, 0)
+
+#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
+ (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
+ (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_QOS_OFFSET(ch) \
+ GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
+ (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+#define WRR_WEIGHT_FMASK GENMASK(3, 0)
+#define MAX_PREFETCH_FMASK GENMASK(8, 8)
+#define USE_DB_ENG_FMASK GENMASK(9, 9)
+/* The next field is present for GSI v2.0 and above */
+#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
+
+#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
+ (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
+ (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
+ (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
+ (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
+ (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+#define EV_CHTYPE_FMASK GENMASK(3, 0)
+#define EV_EE_FMASK GENMASK(7, 4)
+#define EV_EVCHID_FMASK GENMASK(15, 8)
+#define EV_INTYPE_FMASK GENMASK(16, 16)
+#define EV_CHSTATE_FMASK GENMASK(23, 20)
+#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
+ (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+#define EV_R_LENGTH_FMASK GENMASK(15, 0)
+
+#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
+ (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
+ (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
+ (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
+ (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+#define MODT_FMASK GENMASK(15, 0)
+#define MODC_FMASK GENMASK(23, 16)
+#define MOD_CNT_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
+ (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
+ (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
+ (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
+ (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
+ (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
+ (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
+ (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
+ (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+
+#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
+ (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+
+#define GSI_GSI_STATUS_OFFSET \
+ GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
+ (0x0001f000 + 0x4000 * (ee))
+#define ENABLED_FMASK GENMASK(0, 0)
+
+#define GSI_CH_CMD_OFFSET \
+ GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CH_CMD_OFFSET(ee) \
+ (0x0001f008 + 0x4000 * (ee))
+#define CH_CHID_FMASK GENMASK(7, 0)
+#define CH_OPCODE_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_CMD_OFFSET \
+ GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
+ (0x0001f010 + 0x4000 * (ee))
+#define EV_CHID_FMASK GENMASK(7, 0)
+#define EV_OPCODE_FMASK GENMASK(31, 24)
+
+#define GSI_GENERIC_CMD_OFFSET \
+ GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
+ (0x0001f018 + 0x4000 * (ee))
+#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
+#define GENERIC_CHID_FMASK GENMASK(9, 5)
+#define GENERIC_EE_FMASK GENMASK(13, 10)
+
+#define GSI_GSI_HW_PARAM_2_OFFSET \
+ GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
+ (0x0001f040 + 0x4000 * (ee))
+#define IRAM_SIZE_FMASK GENMASK(2, 0)
+#define IRAM_SIZE_ONE_KB_FVAL 0
+#define IRAM_SIZE_TWO_KB_FVAL 1
+/* The next two values are available for GSI v2.0 and above */
+#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
+#define IRAM_SIZE_THREE_KB_FVAL 3
+#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
+#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
+#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
+#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
+/* Fields below are present for GSI v2.0 and above */
+#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
+#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
+#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
+#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
+/* Fields below are present for GSI v2.2 and above */
+#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
+#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+
+#define GSI_CNTXT_TYPE_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
+ (0x0001f080 + 0x4000 * (ee))
+#define CH_CTRL_FMASK GENMASK(0, 0)
+#define EV_CTRL_FMASK GENMASK(1, 1)
+#define GLOB_EE_FMASK GENMASK(2, 2)
+#define IEOB_FMASK GENMASK(3, 3)
+#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
+#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
+#define GENERAL_FMASK GENMASK(6, 6)
+
+#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
+ (0x0001f088 + 0x4000 * (ee))
+#define MSK_CH_CTRL_FMASK GENMASK(0, 0)
+#define MSK_EV_CTRL_FMASK GENMASK(1, 1)
+#define MSK_GLOB_EE_FMASK GENMASK(2, 2)
+#define MSK_IEOB_FMASK GENMASK(3, 3)
+#define MSK_INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
+#define MSK_INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
+#define MSK_GENERAL_FMASK GENMASK(6, 6)
+#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
+
+#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
+ (0x0001f090 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0001f094 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
+ (0x0001f098 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
+ (0x0001f09c + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0a0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0a4 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
+ (0x0001f0b0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
+ (0x0001f0b8 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0c0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
+ (0x0001f100 + 0x4000 * (ee))
+#define ERROR_INT_FMASK GENMASK(0, 0)
+#define GP_INT1_FMASK GENMASK(1, 1)
+#define GP_INT2_FMASK GENMASK(2, 2)
+#define GP_INT3_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
+ (0x0001f108 + 0x4000 * (ee))
+#define EN_ERROR_INT_FMASK GENMASK(0, 0)
+#define EN_GP_INT1_FMASK GENMASK(1, 1)
+#define EN_GP_INT2_FMASK GENMASK(2, 2)
+#define EN_GP_INT3_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
+
+#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
+ (0x0001f110 + 0x4000 * (ee))
+#define CLR_ERROR_INT_FMASK GENMASK(0, 0)
+#define CLR_GP_INT1_FMASK GENMASK(1, 1)
+#define CLR_GP_INT2_FMASK GENMASK(2, 2)
+#define CLR_GP_INT3_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
+ (0x0001f118 + 0x4000 * (ee))
+#define BREAK_POINT_FMASK GENMASK(0, 0)
+#define BUS_ERROR_FMASK GENMASK(1, 1)
+#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
+ (0x0001f120 + 0x4000 * (ee))
+#define EN_BREAK_POINT_FMASK GENMASK(0, 0)
+#define EN_BUS_ERROR_FMASK GENMASK(1, 1)
+#define EN_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define EN_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
+
+#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
+ (0x0001f128 + 0x4000 * (ee))
+#define CLR_BREAK_POINT_FMASK GENMASK(0, 0)
+#define CLR_BUS_ERROR_FMASK GENMASK(1, 1)
+#define CLR_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define CLR_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_INTSET_OFFSET \
+ GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
+ (0x0001f180 + 0x4000 * (ee))
+#define INTYPE_FMASK GENMASK(0, 0)
+
+#define GSI_ERROR_LOG_OFFSET \
+ GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
+ (0x0001f200 + 0x4000 * (ee))
+#define ERR_ARG3_FMASK GENMASK(3, 0)
+#define ERR_ARG2_FMASK GENMASK(7, 4)
+#define ERR_ARG1_FMASK GENMASK(11, 8)
+#define ERR_CODE_FMASK GENMASK(15, 12)
+#define ERR_VIRT_IDX_FMASK GENMASK(23, 19)
+#define ERR_TYPE_FMASK GENMASK(27, 24)
+#define ERR_EE_FMASK GENMASK(31, 28)
+
+#define GSI_ERROR_LOG_CLR_OFFSET \
+ GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
+ (0x0001f210 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SCRATCH_0_OFFSET \
+ GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
+ (0x0001f400 + 0x4000 * (ee))
+#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
+#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
+#define GENERIC_EE_SUCCESS_FVAL 1
+#define GENERIC_EE_NO_RESOURCES_FVAL 7
+#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
+#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
+
+#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
new file mode 100644
index 000000000000..2fd21d75367d
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+#include "ipa_cmd.h"
+
+/**
+ * DOC: GSI Transactions
+ *
+ * A GSI transaction abstracts the behavior of a GSI channel by representing
+ * everything about a related group of IPA commands in a single structure.
+ * (A "command" in this sense is either a data transfer or an IPA immediate
+ * command.) Most details of interaction with the GSI hardware are managed
+ * by the GSI transaction core, allowing users to simply describe commands
+ * to be performed. When a transaction has completed a callback function
+ * (dependent on the type of endpoint associated with the channel) allows
+ * cleanup of resources associated with the transaction.
+ *
+ * To perform a command (or set of them), a user of the GSI transaction
+ * interface allocates a transaction, indicating the number of TREs required
+ * (one per command). If sufficient TREs are available, they are reserved
+ * for use in the transaction and the allocation succeeds. This way
+ * exhaustion of the available TREs in a channel ring is detected
+ * as early as possible. All resources required to complete a transaction
+ * are allocated at transaction allocation time.
+ *
+ * Commands performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures. This array is allocated with the
+ * transaction, and its entries are initialized using standard scatterlist
+ * functions (such as sg_set_buf() or skb_to_sgvec()).
+ *
+ * Once a transaction's scatterlist structures have been initialized, the
+ * transaction is committed. The caller is responsible for mapping buffers
+ * for DMA if necessary, and this should be done *before* allocating
+ * the transaction. Between a successful allocation and commit of a
+ * transaction no errors should occur.
+ *
+ * Committing transfers ownership of the entire transaction to the GSI
+ * transaction core. The GSI transaction code formats the content of
+ * the scatterlist array into the channel ring buffer and informs the
+ * hardware that new TREs are available to process.
+ *
+ * The last TRE in each transaction is marked to interrupt the AP when the
+ * GSI hardware has completed it. Because transfers described by TREs are
+ * performed strictly in order, signaling the completion of just the last
+ * TRE in the transaction is sufficient to indicate the full transaction
+ * is complete.
+ *
+ * When a transaction is complete, ipa_gsi_trans_complete() is called by the
+ * GSI code into the IPA layer, allowing it to perform any final cleanup
+ * required before the transaction is freed.
+ */
+
+/* Hardware values representing a transfer element type */
+enum gsi_tre_type {
+ GSI_RE_XFER = 0x2,
+ GSI_RE_IMMD_CMD = 0x3,
+};
+
+/* An entry in a channel ring */
+struct gsi_tre {
+ __le64 addr; /* DMA address */
+ __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
+ __le16 reserved;
+ __le32 flags; /* TRE_FLAGS_* */
+};
+
+/* gsi_tre->flags mask values (in CPU byte order) */
+#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
+#define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
+#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
+#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
+#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
+
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc)
+{
+ void *virt;
+
+#ifdef IPA_VALIDATE
+ if (!size || size % 8)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+#endif /* IPA_VALIDATE */
+
+ /* By allocating a few extra entries in our pool (one less
+ * than the maximum number that will be requested in a
+ * single allocation), we can always satisfy requests without
+ * ever worrying about straddling the end of the pool array.
+ * If there aren't enough entries starting at the free index,
+ * we just allocate free entries from the beginning of the pool.
+ */
+ virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ /* If the allocator gave us any extra memory, use it */
+ pool->count = ksize(pool->base) / size;
+ pool->free = 0;
+ pool->max_alloc = max_alloc;
+ pool->size = size;
+ pool->addr = 0; /* Only used for DMA pools */
+
+ return 0;
+}
+
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
+{
+ kfree(pool->base);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Allocate the requested number of (zeroed) entries from the pool */
+/* Home-grown DMA pool. This way we can preallocate and use the tre_count
+ * to guarantee allocations will succeed. Even though we specify max_alloc
+ * (and it can be more than one), we only allow allocation of a single
+ * element from a DMA pool.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc)
+{
+ size_t total_size;
+ dma_addr_t addr;
+ void *virt;
+
+#ifdef IPA_VALIDATE
+ if (!size || size % 8)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+#endif /* IPA_VALIDATE */
+
+ /* Don't let allocations cross a power-of-two boundary */
+ size = __roundup_pow_of_two(size);
+ total_size = (count + max_alloc - 1) * size;
+
+ /* The allocator will give us a power-of-2 number of pages. But we
+ * can't guarantee that, so request it. That way we won't waste any
+ * memory that would be available beyond the required space.
+ */
+ total_size = get_order(total_size) << PAGE_SHIFT;
+
+ virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ pool->count = total_size / size;
+ pool->free = 0;
+ pool->size = size;
+ pool->max_alloc = max_alloc;
+ pool->addr = addr;
+
+ return 0;
+}
+
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
+{
+ dma_free_coherent(dev, pool->size, pool->base, pool->addr);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Return the byte offset of the next free entry in the pool */
+static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
+{
+ u32 offset;
+
+ /* assert(count > 0); */
+ /* assert(count <= pool->max_alloc); */
+
+ /* Allocate from beginning if wrap would occur */
+ if (count > pool->count - pool->free)
+ pool->free = 0;
+
+ offset = pool->free * pool->size;
+ pool->free += count;
+ memset(pool->base + offset, 0, count * pool->size);
+
+ return offset;
+}
+
+/* Allocate a contiguous block of zeroed entries from a pool */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
+{
+ return pool->base + gsi_trans_pool_alloc_common(pool, count);
+}
+
+/* Allocate a single zeroed entry from a DMA pool */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
+{
+ u32 offset = gsi_trans_pool_alloc_common(pool, 1);
+
+ *addr = pool->addr + offset;
+
+ return pool->base + offset;
+}
+
+/* Return the pool element that immediately follows the one given.
+ * This only works done if elements are allocated one at a time.
+ */
+void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+{
+ void *end = pool->base + pool->count * pool->size;
+
+ /* assert(element >= pool->base); */
+ /* assert(element < end); */
+ /* assert(pool->max_alloc == 1); */
+ element += pool->size;
+
+ return element < end ? element : pool->base;
+}
+
+/* Map a given ring entry index to the transaction associated with it */
+static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
+ struct gsi_trans *trans)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ channel->trans_info.map[index % channel->tre_ring.count] = trans;
+}
+
+/* Return the transaction mapped to a given ring entry */
+struct gsi_trans *
+gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return channel->trans_info.map[index % channel->tre_ring.count];
+}
+
+/* Return the oldest completed transaction for a channel (or null) */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
+{
+ return list_first_entry_or_null(&channel->trans_info.complete,
+ struct gsi_trans, links);
+}
+
+/* Move a transaction from the allocated list to the pending list */
+static void gsi_trans_move_pending(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->pending);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move a transaction and all of its predecessors from the pending list
+ * to the completed list.
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct list_head list;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ /* Move this transaction and all predecessors to completed list */
+ list_cut_position(&list, &trans_info->pending, &trans->links);
+ list_splice_tail(&list, &trans_info->complete);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move a transaction from the completed list to the polled list */
+void gsi_trans_move_polled(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->polled);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Reserve some number of TREs on a channel. Returns true if successful */
+static bool
+gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ int avail = atomic_read(&trans_info->tre_avail);
+ int new;
+
+ do {
+ new = avail - (int)tre_count;
+ if (unlikely(new < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
+
+ return true;
+}
+
+/* Release previously-reserved TRE entries to a channel */
+static void
+gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ atomic_add(tre_count, &trans_info->tre_avail);
+}
+
+/* Allocate a GSI transaction on a channel */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ struct gsi_trans *trans;
+
+ /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
+
+ trans_info = &channel->trans_info;
+
+ /* We reserve the TREs now, but consume them at commit time.
+ * If there aren't enough available, we're done.
+ */
+ if (!gsi_trans_tre_reserve(trans_info, tre_count))
+ return NULL;
+
+ /* Allocate and initialize non-zero fields in the the transaction */
+ trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans->gsi = gsi;
+ trans->channel_id = channel_id;
+ trans->tre_count = tre_count;
+ init_completion(&trans->completion);
+
+ /* Allocate the scatterlist and (if requested) info entries. */
+ trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
+ sg_init_marker(trans->sgl, tre_count);
+
+ trans->direction = direction;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_add_tail(&trans->links, &trans_info->alloc);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ refcount_set(&trans->refcount, 1);
+
+ return trans;
+}
+
+/* Free a previously-allocated transaction (used only in case of error) */
+void gsi_trans_free(struct gsi_trans *trans)
+{
+ struct gsi_trans_info *trans_info;
+
+ if (!refcount_dec_and_test(&trans->refcount))
+ return;
+
+ trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_del(&trans->links);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ ipa_gsi_trans_release(trans);
+
+ /* Releasing the reserved TREs implicitly frees the sgl[] and
+ * (if present) info[] arrays, plus the transaction itself.
+ */
+ gsi_trans_tre_release(trans_info, trans->tre_count);
+}
+
+/* Add an immediate command to a transaction */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum dma_data_direction direction,
+ enum ipa_cmd_opcode opcode)
+{
+ struct ipa_cmd_info *info;
+ u32 which = trans->used++;
+ struct scatterlist *sg;
+
+ /* assert(which < trans->tre_count); */
+
+ /* Set the page information for the buffer. We also need to fill in
+ * the DMA address for the buffer (something dma_map_sg() normally
+ * does).
+ */
+ sg = &trans->sgl[which];
+
+ sg_set_buf(sg, buf, size);
+ sg_dma_address(sg) = addr;
+
+ info = &trans->info[which];
+ info->opcode = opcode;
+ info->direction = direction;
+}
+
+/* Add a page transfer to a transaction. It will fill the only TRE. */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ int ret;
+
+ /* assert(trans->tre_count == 1); */
+ /* assert(!trans->used); */
+
+ sg_set_page(sg, page, size, offset);
+ ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used++; /* Transaction now owns the (DMA mapped) page */
+
+ return 0;
+}
+
+/* Add an SKB transfer to a transaction. No other TREs will be used. */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ u32 used;
+ int ret;
+
+ /* assert(trans->tre_count == 1); */
+ /* assert(!trans->used); */
+
+ /* skb->len will not be 0 (checked early) */
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (ret < 0)
+ return ret;
+ used = ret;
+
+ ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+
+ return 0;
+}
+
+/* Compute the length/opcode value to use for a TRE */
+static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
+{
+ return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
+ : cpu_to_le16((u16)opcode);
+}
+
+/* Compute the flags value to use for a given TRE */
+static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
+{
+ enum gsi_tre_type tre_type;
+ u32 tre_flags;
+
+ tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
+ tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
+
+ /* Last TRE contains interrupt flags */
+ if (last_tre) {
+ /* All transactions end in a transfer completion interrupt */
+ tre_flags |= TRE_FLAGS_IEOT_FMASK;
+ /* Don't interrupt when outbound commands are acknowledged */
+ if (bei)
+ tre_flags |= TRE_FLAGS_BEI_FMASK;
+ } else { /* All others indicate there's more to come */
+ tre_flags |= TRE_FLAGS_CHAIN_FMASK;
+ }
+
+ return cpu_to_le32(tre_flags);
+}
+
+static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
+ u32 len, bool last_tre, bool bei,
+ enum ipa_cmd_opcode opcode)
+{
+ struct gsi_tre tre;
+
+ tre.addr = cpu_to_le64(addr);
+ tre.len_opcode = gsi_tre_len_opcode(opcode, len);
+ tre.reserved = 0;
+ tre.flags = gsi_tre_flags(last_tre, bei, opcode);
+
+ /* ARM64 can write 16 bytes as a unit with a single instruction.
+ * Doing the assignment this way is an attempt to make that happen.
+ */
+ *dest_tre = tre;
+}
+
+/**
+ * __gsi_trans_commit() - Common GSI transaction commit code
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ *
+ * Formats channel ring TRE entries based on the content of the scatterlist.
+ * Maps a transaction pointer to the last ring entry used for the transaction,
+ * so it can be recovered when it completes. Moves the transaction to the
+ * pending list. Finally, updates the channel ring pointer and optionally
+ * rings the doorbell.
+ */
+static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_ring *ring = &channel->tre_ring;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ bool bei = channel->toward_ipa;
+ struct ipa_cmd_info *info;
+ struct gsi_tre *dest_tre;
+ struct scatterlist *sg;
+ u32 byte_count = 0;
+ u32 avail;
+ u32 i;
+
+ /* assert(trans->used > 0); */
+
+ /* Consume the entries. If we cross the end of the ring while
+ * filling them we'll switch to the beginning to finish.
+ * If there is no info array we're doing a simple data
+ * transfer request, whose opcode is IPA_CMD_NONE.
+ */
+ info = trans->info ? &trans->info[0] : NULL;
+ avail = ring->count - ring->index % ring->count;
+ dest_tre = gsi_ring_virt(ring, ring->index);
+ for_each_sg(trans->sgl, sg, trans->used, i) {
+ bool last_tre = i == trans->used - 1;
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 len = sg_dma_len(sg);
+
+ byte_count += len;
+ if (!avail--)
+ dest_tre = gsi_ring_virt(ring, 0);
+ if (info)
+ opcode = info++->opcode;
+
+ gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
+ dest_tre++;
+ }
+ ring->index += trans->used;
+
+ if (channel->toward_ipa) {
+ /* We record TX bytes when they are sent */
+ trans->len = byte_count;
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+ channel->trans_count++;
+ channel->byte_count += byte_count;
+ }
+
+ /* Associate the last TRE with the transaction */
+ gsi_channel_trans_map(channel, ring->index - 1, trans);
+
+ gsi_trans_move_pending(trans);
+
+ /* Ring doorbell if requested, or if all TREs are allocated */
+ if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
+ /* Report what we're handing off to hardware for TX channels */
+ if (channel->toward_ipa)
+ gsi_channel_tx_queued(channel);
+ gsi_channel_doorbell(channel);
+ }
+}
+
+/* Commit a GSI transaction */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ if (trans->used)
+ __gsi_trans_commit(trans, ring_db);
+ else
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete */
+void gsi_trans_commit_wait(struct gsi_trans *trans)
+{
+ if (!trans->used)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ wait_for_completion(&trans->completion);
+
+out_trans_free:
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete, with timeout */
+int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
+ unsigned long timeout)
+{
+ unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
+ unsigned long remaining = 1; /* In case of empty transaction */
+
+ if (!trans->used)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ remaining = wait_for_completion_timeout(&trans->completion,
+ timeout_jiffies);
+out_trans_free:
+ gsi_trans_free(trans);
+
+ return remaining ? 0 : -ETIMEDOUT;
+}
+
+/* Process the completion of a transaction; called while polling */
+void gsi_trans_complete(struct gsi_trans *trans)
+{
+ /* If the entire SGL was mapped when added, unmap it now */
+ if (trans->direction != DMA_NONE)
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ trans->direction);
+
+ ipa_gsi_trans_complete(trans);
+
+ complete(&trans->completion);
+
+ gsi_trans_free(trans);
+}
+
+/* Cancel a channel's pending transactions */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct gsi_trans *trans;
+ bool cancelled;
+
+ /* channel->gsi->mutex is held by caller */
+ spin_lock_bh(&trans_info->spinlock);
+
+ cancelled = !list_empty(&trans_info->pending);
+ list_for_each_entry(trans, &trans_info->pending, links)
+ trans->cancelled = true;
+
+ list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ /* Schedule NAPI polling to complete the cancelled transactions */
+ if (cancelled)
+ napi_schedule(&channel->napi);
+}
+
+/* Issue a command to read a single byte from a channel */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_tre *dest_tre;
+
+ trans_info = &channel->trans_info;
+
+ /* First reserve the TRE, if possible */
+ if (!gsi_trans_tre_reserve(trans_info, 1))
+ return -EBUSY;
+
+ /* Now fill the the reserved TRE and tell the hardware */
+
+ dest_tre = gsi_ring_virt(ring, ring->index);
+ gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
+
+ ring->index++;
+ gsi_channel_doorbell(channel);
+
+ return 0;
+}
+
+/* Mark a gsi_trans_read_byte() request done */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ gsi_trans_tre_release(&channel->trans_info, 1);
+}
+
+/* Initialize a channel's GSI transaction info */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ u32 tre_max;
+ int ret;
+
+ /* Ensure the size of a channel element is what's expected */
+ BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
+
+ /* The map array is used to determine what transaction is associated
+ * with a TRE that the hardware reports has completed. We need one
+ * map entry per TRE.
+ */
+ trans_info = &channel->trans_info;
+ trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map)
+ return -ENOMEM;
+
+ /* We can't use more TREs than there are available in the ring.
+ * This limits the number of transactions that can be oustanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). We allocate resources
+ * for transactions (including transaction structures) based on
+ * this maximum number.
+ */
+ tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+
+ /* Transactions are allocated one at a time. */
+ ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
+ tre_max, 1);
+ if (ret)
+ goto err_kfree;
+
+ /* A transaction uses a scatterlist array to represent the data
+ * transfers implemented by the transaction. Each scatterlist
+ * element is used to fill a single TRE when the transaction is
+ * committed. So we need as many scatterlist elements as the
+ * maximum number of TREs that can be outstanding.
+ *
+ * All TREs in a transaction must fit within the channel's TLV FIFO.
+ * A transaction on a channel can allocate as many TREs as that but
+ * no more.
+ */
+ ret = gsi_trans_pool_init(&trans_info->sg_pool,
+ sizeof(struct scatterlist),
+ tre_max, channel->tlv_count);
+ if (ret)
+ goto err_trans_pool_exit;
+
+ /* Finally, the tre_avail field is what ultimately limits the number
+ * of outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient for
+ * what the transaction might need. Transaction resource pools are
+ * sized based on the maximum number of outstanding TREs, so there
+ * will always be resources available if there are TREs available.
+ */
+ atomic_set(&trans_info->tre_avail, tre_max);
+
+ spin_lock_init(&trans_info->spinlock);
+ INIT_LIST_HEAD(&trans_info->alloc);
+ INIT_LIST_HEAD(&trans_info->pending);
+ INIT_LIST_HEAD(&trans_info->complete);
+ INIT_LIST_HEAD(&trans_info->polled);
+
+ return 0;
+
+err_trans_pool_exit:
+ gsi_trans_pool_exit(&trans_info->pool);
+err_kfree:
+ kfree(trans_info->map);
+
+ dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
+ ret, channel_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_trans_init() */
+void gsi_channel_trans_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ gsi_trans_pool_exit(&trans_info->sg_pool);
+ gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->map);
+}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
new file mode 100644
index 000000000000..1477fc15b30a
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _GSI_TRANS_H_
+#define _GSI_TRANS_H_
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+
+#include "ipa_cmd.h"
+
+struct scatterlist;
+struct device;
+struct sk_buff;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_trans_pool;
+
+/**
+ * struct gsi_trans - a GSI transaction
+ *
+ * Most fields in this structure for internal use by the transaction core code:
+ * @links: Links for channel transaction lists by state
+ * @gsi: GSI pointer
+ * @channel_id: Channel number transaction is associated with
+ * @cancelled: If set by the core code, transaction was cancelled
+ * @tre_count: Number of TREs reserved for this transaction
+ * @used: Number of TREs *used* (could be less than tre_count)
+ * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @data: Preserved but not touched by the core transaction code
+ * @sgl: An array of scatter/gather entries managed by core code
+ * @info: Array of command information structures (command channel)
+ * @direction: DMA transfer direction (DMA_NONE for commands)
+ * @refcount: Reference count used for destruction
+ * @completion: Completed when the transaction completes
+ * @byte_count: TX channel byte count recorded when transaction committed
+ * @trans_count: Channel transaction count when committed (for BQL accounting)
+ *
+ * The size used for some fields in this structure were chosen to ensure
+ * the full structure size is no larger than 128 bytes.
+ */
+struct gsi_trans {
+ struct list_head links; /* gsi_channel lists */
+
+ struct gsi *gsi;
+ u8 channel_id;
+
+ bool cancelled; /* true if transaction was cancelled */
+
+ u8 tre_count; /* # TREs requested */
+ u8 used; /* # entries used in sgl[] */
+ u32 len; /* total # bytes across sgl[] */
+
+ void *data;
+ struct scatterlist *sgl;
+ struct ipa_cmd_info *info; /* array of entries, or null */
+ enum dma_data_direction direction;
+
+ refcount_t refcount;
+ struct completion completion;
+
+ u64 byte_count; /* channel byte_count when committed */
+ u64 trans_count; /* channel trans_count when committed */
+};
+
+/**
+ * gsi_trans_pool_init() - Initialize a pool of structures for transactions
+ * @gsi: GSI pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc() - Allocate one or more elements from a pool
+ * @pool: Pool pointer
+ * @count: Number of elements to allocate from the pool
+ *
+ * @Return: Virtual address of element(s) allocated from the pool
+ */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool);
+
+/**
+ * gsi_trans_pool_init_dma() - Initialize a pool of DMA-able structures
+ * @dev: Device used for DMA
+ * @pool: Pool pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Structures in this pool reside in DMA-coherent memory.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc_dma() - Allocate an element from a DMA pool
+ * @pool: DMA pool pointer
+ * @addr: DMA address "handle" associated with the allocation
+ *
+ * @Return: Virtual address of element allocated from the pool
+ *
+ * Only one element at a time may be allocated from a DMA pool.
+ */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
+
+/**
+ * gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ * @tre_count: Number of elements in the transaction
+ * @direction: DMA direction for entire SGL (or DMA_NONE)
+ *
+ * @Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction);
+
+/**
+ * gsi_trans_free() - Free a previously-allocated GSI transaction
+ * @trans: Transaction to be freed
+ */
+void gsi_trans_free(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_cmd_add() - Add an immediate command to a transaction
+ * @trans: Transaction
+ * @buf: Buffer pointer for command payload
+ * @size: Number of bytes in buffer
+ * @addr: DMA address for payload
+ * @direction: Direction of DMA transfer (or DMA_NONE if none required)
+ * @opcode: IPA immediate command opcode
+ */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum dma_data_direction direction,
+ enum ipa_cmd_opcode opcode);
+
+/**
+ * gsi_trans_page_add() - Add a page transfer to a transaction
+ * @trans: Transaction
+ * @page: Page pointer
+ * @size: Number of bytes (starting at offset) to transfer
+ * @offset: Offset within page for start of transfer
+ */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset);
+
+/**
+ * gsi_trans_skb_add() - Add a socket transfer to a transaction
+ * @trans: Transaction
+ * @skb: Socket buffer for transfer (outbound)
+ *
+ * @Return: 0, or -EMSGSIZE if socket data won't fit in transaction.
+ */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb);
+
+/**
+ * gsi_trans_commit() - Commit a GSI transaction
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
+
+/**
+ * gsi_trans_commit_wait() - Commit a GSI transaction and wait for it
+ * to complete
+ * @trans: Transaction to commit
+ */
+void gsi_trans_commit_wait(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
+ * it to complete, with timeout
+ * @trans: Transaction to commit
+ * @timeout: Timeout period (in milliseconds)
+ */
+int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
+ unsigned long timeout);
+
+/**
+ * gsi_trans_read_byte() - Issue a single byte read TRE on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which to read a byte
+ * @addr: DMA address into which to transfer the one byte
+ *
+ * This is not a transaction operation at all. It's defined here because
+ * it needs to be done in coordination with other transaction activity.
+ */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr);
+
+/**
+ * gsi_trans_read_byte_done() - Clean up after a single byte read TRE
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which byte was read
+ *
+ * This function needs to be called to signal that the work related
+ * to reading a byte initiated by gsi_trans_read_byte() is complete.
+ */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
+
+#endif /* _GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
new file mode 100644
index 000000000000..23fb29889e5a
--- /dev/null
+++ b/drivers/net/ipa/ipa.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/pm_wakeup.h>
+
+#include "ipa_version.h"
+#include "gsi.h"
+#include "ipa_mem.h"
+#include "ipa_qmi.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+struct clk;
+struct icc_path;
+struct net_device;
+struct platform_device;
+
+struct ipa_clock;
+struct ipa_smp2p;
+struct ipa_interrupt;
+
+/**
+ * struct ipa - IPA information
+ * @gsi: Embedded GSI structure
+ * @version: IPA hardware version
+ * @pdev: Platform device
+ * @modem_rproc: Remoteproc handle for modem subsystem
+ * @smp2p: SMP2P information
+ * @clock: IPA clocking information
+ * @suspend_ref: Whether clock reference preventing suspend taken
+ * @table_addr: DMA address of filter/route table content
+ * @table_virt: Virtual address of filter/route table content
+ * @interrupt: IPA Interrupt information
+ * @uc_loaded: true after microcontroller has reported it's ready
+ * @reg_addr: DMA address used for IPA register access
+ * @reg_virt: Virtual address used for IPA register access
+ * @mem_addr: DMA address of IPA-local memory space
+ * @mem_virt: Virtual address of IPA-local memory space
+ * @mem_offset: Offset from @mem_virt used for access to IPA memory
+ * @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem: Array of IPA-local memory region descriptors
+ * @zero_addr: DMA address of preallocated zero-filled memory
+ * @zero_virt: Virtual address of preallocated zero-filled memory
+ * @zero_size: Size (bytes) of preallocated zero-filled memory
+ * @wakeup_source: Wakeup source information
+ * @available: Bit mask indicating endpoints hardware supports
+ * @filter_map: Bit mask indicating endpoints that support filtering
+ * @initialized: Bit mask indicating endpoints initialized
+ * @set_up: Bit mask indicating endpoints set up
+ * @enabled: Bit mask indicating endpoints enabled
+ * @endpoint: Array of endpoint information
+ * @channel_map: Mapping of GSI channel to IPA endpoint
+ * @name_map: Mapping of IPA endpoint name to IPA endpoint
+ * @setup_complete: Flag indicating whether setup stage has completed
+ * @modem_state: State of modem (stopped, running)
+ * @modem_netdev: Network device structure used for modem
+ * @qmi: QMI information
+ */
+struct ipa {
+ struct gsi gsi;
+ enum ipa_version version;
+ struct platform_device *pdev;
+ struct rproc *modem_rproc;
+ struct ipa_smp2p *smp2p;
+ struct ipa_clock *clock;
+ atomic_t suspend_ref;
+
+ dma_addr_t table_addr;
+ __le64 *table_virt;
+
+ struct ipa_interrupt *interrupt;
+ bool uc_loaded;
+
+ dma_addr_t reg_addr;
+ void __iomem *reg_virt;
+
+ dma_addr_t mem_addr;
+ void *mem_virt;
+ u32 mem_offset;
+ u32 mem_size;
+ const struct ipa_mem *mem;
+
+ dma_addr_t zero_addr;
+ void *zero_virt;
+ size_t zero_size;
+
+ struct wakeup_source *wakeup_source;
+
+ /* Bit masks indicating endpoint state */
+ u32 available; /* supported by hardware */
+ u32 filter_map;
+ u32 initialized;
+ u32 set_up;
+ u32 enabled;
+
+ struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
+ struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
+ struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
+
+ bool setup_complete;
+
+ atomic_t modem_state; /* enum ipa_modem_state */
+ struct net_device *modem_netdev;
+ struct ipa_qmi qmi;
+};
+
+/**
+ * ipa_setup() - Perform IPA setup
+ * @ipa: IPA pointer
+ *
+ * IPA initialization is broken into stages: init; config; and setup.
+ * (These have inverses exit, deconfig, and teardown.)
+ *
+ * Activities performed at the init stage can be done without requiring
+ * any access to IPA hardware. Activities performed at the config stage
+ * require the IPA clock to be running, because they involve access
+ * to IPA registers. The setup stage is performed only after the GSI
+ * hardware is ready (more on this below). The setup stage allows
+ * the AP to perform more complex initialization by issuing "immediate
+ * commands" using a special interface to the IPA.
+ *
+ * This function, @ipa_setup(), starts the setup stage.
+ *
+ * In order for the GSI hardware to be functional it needs firmware to be
+ * loaded (in addition to some other low-level initialization). This early
+ * GSI initialization can be done either by Trust Zone on the AP or by the
+ * modem.
+ *
+ * If it's done by Trust Zone, the AP loads the GSI firmware and supplies
+ * it to Trust Zone to verify and install. When this completes, if
+ * verification was successful, the GSI layer is ready and ipa_setup()
+ * implements the setup phase of initialization.
+ *
+ * If the modem performs early GSI initialization, the AP needs to know
+ * when this has occurred. An SMP2P interrupt is used for this purpose,
+ * and receipt of that interrupt triggers the call to ipa_setup().
+ */
+int ipa_setup(struct ipa *ipa);
+
+#endif /* _IPA_H_ */
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
new file mode 100644
index 000000000000..374491ea11cf
--- /dev/null
+++ b/drivers/net/ipa/ipa_clock.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_modem.h"
+
+/**
+ * DOC: IPA Clocking
+ *
+ * The "IPA Clock" manages both the IPA core clock and the interconnects
+ * (buses) the IPA depends on as a single logical entity. A reference count
+ * is incremented by "get" operations and decremented by "put" operations.
+ * Transitions of that count from 0 to 1 result in the clock and interconnects
+ * being enabled, and transitions of the count from 1 to 0 cause them to be
+ * disabled. We currently operate the core clock at a fixed clock rate, and
+ * all buses at a fixed average and peak bandwidth. As more advanced IPA
+ * features are enabled, we can make better use of clock and bus scaling.
+ *
+ * An IPA clock reference must be held for any access to IPA hardware.
+ */
+
+#define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */
+
+/* Interconnect path bandwidths (each times 1000 bytes per second) */
+#define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */
+#define IPA_MEMORY_PEAK (600 * 1000)
+
+#define IPA_IMEM_AVG (80 * 1000)
+#define IPA_IMEM_PEAK (350 * 1000)
+
+#define IPA_CONFIG_AVG (40 * 1000)
+#define IPA_CONFIG_PEAK (40 * 1000)
+
+/**
+ * struct ipa_clock - IPA clocking information
+ * @count: Clocking reference count
+ * @mutex; Protects clock enable/disable
+ * @core: IPA core clock
+ * @memory_path: Memory interconnect
+ * @imem_path: Internal memory interconnect
+ * @config_path: Configuration space interconnect
+ */
+struct ipa_clock {
+ atomic_t count;
+ struct mutex mutex; /* protects clock enable/disable */
+ struct clk *core;
+ struct icc_path *memory_path;
+ struct icc_path *imem_path;
+ struct icc_path *config_path;
+};
+
+static struct icc_path *
+ipa_interconnect_init_one(struct device *dev, const char *name)
+{
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (IS_ERR(path))
+ dev_err(dev, "error %ld getting memory interconnect\n",
+ PTR_ERR(path));
+
+ return path;
+}
+
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+{
+ struct icc_path *path;
+
+ path = ipa_interconnect_init_one(dev, "memory");
+ if (IS_ERR(path))
+ goto err_return;
+ clock->memory_path = path;
+
+ path = ipa_interconnect_init_one(dev, "imem");
+ if (IS_ERR(path))
+ goto err_memory_path_put;
+ clock->imem_path = path;
+
+ path = ipa_interconnect_init_one(dev, "config");
+ if (IS_ERR(path))
+ goto err_imem_path_put;
+ clock->config_path = path;
+
+ return 0;
+
+err_imem_path_put:
+ icc_put(clock->imem_path);
+err_memory_path_put:
+ icc_put(clock->memory_path);
+err_return:
+ return PTR_ERR(path);
+}
+
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_clock *clock)
+{
+ icc_put(clock->config_path);
+ icc_put(clock->imem_path);
+ icc_put(clock->memory_path);
+}
+
+/* Currently we only use one bandwidth level, so just "enable" interconnects */
+static int ipa_interconnect_enable(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ if (ret)
+ goto err_memory_path_disable;
+
+ ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
+ if (ret)
+ goto err_imem_path_disable;
+
+ return 0;
+
+err_imem_path_disable:
+ (void)icc_set_bw(clock->imem_path, 0, 0);
+err_memory_path_disable:
+ (void)icc_set_bw(clock->memory_path, 0, 0);
+
+ return ret;
+}
+
+/* To disable an interconnect, we just its bandwidth to 0 */
+static int ipa_interconnect_disable(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ ret = icc_set_bw(clock->memory_path, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(clock->imem_path, 0, 0);
+ if (ret)
+ goto err_memory_path_reenable;
+
+ ret = icc_set_bw(clock->config_path, 0, 0);
+ if (ret)
+ goto err_imem_path_reenable;
+
+ return 0;
+
+err_imem_path_reenable:
+ (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+err_memory_path_reenable:
+ (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+
+ return ret;
+}
+
+/* Turn on IPA clocks, including interconnects */
+static int ipa_clock_enable(struct ipa *ipa)
+{
+ int ret;
+
+ ret = ipa_interconnect_enable(ipa);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ipa->clock->core);
+ if (ret)
+ ipa_interconnect_disable(ipa);
+
+ return ret;
+}
+
+/* Inverse of ipa_clock_enable() */
+static void ipa_clock_disable(struct ipa *ipa)
+{
+ clk_disable_unprepare(ipa->clock->core);
+ (void)ipa_interconnect_disable(ipa);
+}
+
+/* Get an IPA clock reference, but only if the reference count is
+ * already non-zero. Returns true if the additional reference was
+ * added successfully, or false otherwise.
+ */
+bool ipa_clock_get_additional(struct ipa *ipa)
+{
+ return !!atomic_inc_not_zero(&ipa->clock->count);
+}
+
+/* Get an IPA clock reference. If the reference count is non-zero, it is
+ * incremented and return is immediate. Otherwise it is checked again
+ * under protection of the mutex, and if appropriate the clock (and
+ * interconnects) are enabled suspended endpoints (if any) are resumed
+ * before returning.
+ *
+ * Incrementing the reference count is intentionally deferred until
+ * after the clock is running and endpoints are resumed.
+ */
+void ipa_clock_get(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ /* If the clock is running, just bump the reference count */
+ if (ipa_clock_get_additional(ipa))
+ return;
+
+ /* Otherwise get the mutex and check again */
+ mutex_lock(&clock->mutex);
+
+ /* A reference might have been added before we got the mutex. */
+ if (ipa_clock_get_additional(ipa))
+ goto out_mutex_unlock;
+
+ ret = ipa_clock_enable(ipa);
+ if (ret) {
+ dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
+ goto out_mutex_unlock;
+ }
+
+ ipa_endpoint_resume(ipa);
+
+ atomic_inc(&clock->count);
+
+out_mutex_unlock:
+ mutex_unlock(&clock->mutex);
+}
+
+/* Attempt to remove an IPA clock reference. If this represents the last
+ * reference, suspend endpoints and disable the clock (and interconnects)
+ * under protection of a mutex.
+ */
+void ipa_clock_put(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+
+ /* If this is not the last reference there's nothing more to do */
+ if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
+ return;
+
+ ipa_endpoint_suspend(ipa);
+
+ ipa_clock_disable(ipa);
+
+ mutex_unlock(&clock->mutex);
+}
+
+/* Initialize IPA clocking */
+struct ipa_clock *ipa_clock_init(struct device *dev)
+{
+ struct ipa_clock *clock;
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(dev, "core");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
+ return ERR_CAST(clk);
+ }
+
+ ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
+ if (ret) {
+ dev_err(dev, "error %d setting core clock rate to %lu\n",
+ ret, IPA_CORE_CLOCK_RATE);
+ goto err_clk_put;
+ }
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+ clock->core = clk;
+
+ ret = ipa_interconnect_init(clock, dev);
+ if (ret)
+ goto err_kfree;
+
+ mutex_init(&clock->mutex);
+ atomic_set(&clock->count, 0);
+
+ return clock;
+
+err_kfree:
+ kfree(clock);
+err_clk_put:
+ clk_put(clk);
+
+ return ERR_PTR(ret);
+}
+
+/* Inverse of ipa_clock_init() */
+void ipa_clock_exit(struct ipa_clock *clock)
+{
+ struct clk *clk = clock->core;
+
+ WARN_ON(atomic_read(&clock->count) != 0);
+ mutex_destroy(&clock->mutex);
+ ipa_interconnect_exit(clock);
+ kfree(clock);
+ clk_put(clk);
+}
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
new file mode 100644
index 000000000000..bc52b35e6bb2
--- /dev/null
+++ b/drivers/net/ipa/ipa_clock.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_CLOCK_H_
+#define _IPA_CLOCK_H_
+
+struct device;
+
+struct ipa;
+
+/**
+ * ipa_clock_init() - Initialize IPA clocking
+ * @dev: IPA device
+ *
+ * @Return: A pointer to an ipa_clock structure, or a pointer-coded error
+ */
+struct ipa_clock *ipa_clock_init(struct device *dev);
+
+/**
+ * ipa_clock_exit() - Inverse of ipa_clock_init()
+ * @clock: IPA clock pointer
+ */
+void ipa_clock_exit(struct ipa_clock *clock);
+
+/**
+ * ipa_clock_get() - Get an IPA clock reference
+ * @ipa: IPA pointer
+ *
+ * This call blocks if this is the first reference.
+ */
+void ipa_clock_get(struct ipa *ipa);
+
+/**
+ * ipa_clock_get_additional() - Get an IPA clock reference if not first
+ * @ipa: IPA pointer
+ *
+ * This returns immediately, and only takes a reference if not the first
+ */
+bool ipa_clock_get_additional(struct ipa *ipa);
+
+/**
+ * ipa_clock_put() - Drop an IPA clock reference
+ * @ipa: IPA pointer
+ *
+ * This drops a clock reference. If the last reference is being dropped,
+ * the clock is stopped and RX endpoints are suspended. This call will
+ * not block unless the last reference is dropped.
+ */
+void ipa_clock_put(struct ipa *ipa);
+
+#endif /* _IPA_CLOCK_H_ */
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
new file mode 100644
index 000000000000..d226b858742d
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA Immediate Commands
+ *
+ * The AP command TX endpoint is used to issue immediate commands to the IPA.
+ * An immediate command is generally used to request the IPA do something
+ * other than data transfer to another endpoint.
+ *
+ * Immediate commands are represented by GSI transactions just like other
+ * transfer requests, represented by a single GSI TRE. Each immediate
+ * command has a well-defined format, having a payload of a known length.
+ * This allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in DRAM
+ * and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback. To commit
+ * an immediate command transaction, either gsi_trans_commit_wait() or
+ * gsi_trans_commit_wait_timeout() is used.
+ */
+
+/* Some commands can wait until indicated pipeline stages are clear */
+enum pipeline_clear_options {
+ pipeline_clear_hps = 0,
+ pipeline_clear_src_grp = 1,
+ pipeline_clear_full = 2,
+};
+
+/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
+
+struct ipa_cmd_hw_ip_fltrt_init {
+ __le64 hash_rules_addr;
+ __le64 flags;
+ __le64 nhash_rules_addr;
+};
+
+/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
+#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
+#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
+#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
+#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
+
+/* IPA_CMD_HDR_INIT_LOCAL */
+
+struct ipa_cmd_hw_hdr_init_local {
+ __le64 hdr_table_addr;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
+#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
+#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
+
+/* IPA_CMD_REGISTER_WRITE */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_register_write {
+ __le16 flags; /* Unused/reserved for IPA v3.5.1 */
+ __le16 offset;
+ __le32 value;
+ __le32 value_mask;
+ __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
+};
+
+/* Field masks for ipa_cmd_register_write structure fields */
+/* The next field is present for IPA v4.0 and above */
+#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
+/* The next field is present for IPA v3.5.1 only */
+#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
+
+/* The next field and its values are present for IPA v3.5.1 only */
+#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
+
+/* IPA_CMD_IP_PACKET_INIT */
+
+struct ipa_cmd_ip_packet_init {
+ u8 dest_endpoint;
+ u8 reserved[7];
+};
+
+/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
+#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
+
+/* IPA_CMD_DMA_TASK_32B_ADDR */
+
+/* This opcode gets modified with a DMA operation count */
+
+#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8)
+
+struct ipa_cmd_hw_dma_task_32b_addr {
+ __le16 flags;
+ __le16 size;
+ __le32 addr;
+ __le16 packet_size;
+ u8 reserved[6];
+};
+
+/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */
+#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0)
+#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11)
+#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12)
+#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13)
+#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14)
+#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15)
+
+/* IPA_CMD_DMA_SHARED_MEM */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_hw_dma_mem_mem {
+ __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
+ __le16 size;
+ __le16 local_addr;
+ __le16 flags;
+ __le64 system_addr;
+};
+
+/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
+#define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
+
+/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
+#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
+/* The next two fields are present for IPA v3.5.1 only. */
+#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
+#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
+
+/* IPA_CMD_IP_PACKET_TAG_STATUS */
+
+struct ipa_cmd_ip_packet_tag_status {
+ __le64 tag;
+};
+
+#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
+
+/* Immediate command payload */
+union ipa_cmd_payload {
+ struct ipa_cmd_hw_ip_fltrt_init table_init;
+ struct ipa_cmd_hw_hdr_init_local hdr_init_local;
+ struct ipa_cmd_register_write register_write;
+ struct ipa_cmd_ip_packet_init ip_packet_init;
+ struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr;
+ struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
+ struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
+};
+
+static void ipa_cmd_validate_build(void)
+{
+ /* The sizes of a filter and route tables need to fit into fields
+ * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
+ * might not be used, non-hashed and hashed tables have the same
+ * maximum size. IPv4 and IPv6 filter tables have the same number
+ * of entries, as and IPv4 and IPv6 route tables have the same number
+ * of entries.
+ */
+#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
+#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+#undef TABLE_COUNT_MAX
+#undef TABLE_SIZE
+}
+
+#ifdef IPA_VALIDATE
+
+/* Validate a memory region holding a table */
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route, bool ipv6, bool hashed)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+
+ offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
+ : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "IPv%c %s%s table region offset too large "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter",
+ ipa->mem_offset, mem->offset, offset_max);
+ return false;
+ }
+
+ if (mem->offset > ipa->mem_size ||
+ mem->size > ipa->mem_size - mem->offset) {
+ dev_err(dev, "IPv%c %s%s table region out of range "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter",
+ mem->offset, mem->size, ipa->mem_size);
+ return false;
+ }
+
+ return true;
+}
+
+/* Validate the memory region that holds headers */
+static bool ipa_cmd_header_valid(struct ipa *ipa)
+{
+ const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 size_max;
+ u32 size;
+
+ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "header table region offset too large "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset + mem->offset, offset_max);
+ return false;
+ }
+
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
+ dev_err(dev, "header table region out of range "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ mem->offset, size, ipa->mem_size);
+ return false;
+ }
+
+ return true;
+}
+
+/* Indicate whether an offset can be used with a register_write command */
+static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
+ const char *name, u32 offset)
+{
+ struct ipa_cmd_register_write *payload;
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 bit_count;
+
+ /* The maximum offset in a register_write immediate command depends
+ * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but
+ * newer versions allow some additional high-order bits.
+ */
+ bit_count = BITS_PER_BYTE * sizeof(payload->offset);
+ if (ipa->version != IPA_VERSION_3_5_1)
+ bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ BUILD_BUG_ON(bit_count > 32);
+ offset_max = ~0 >> (32 - bit_count);
+
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset + offset, offset_max);
+ return false;
+ }
+
+ return true;
+}
+
+/* Check whether offsets passed to register_write are valid */
+static bool ipa_cmd_register_write_valid(struct ipa *ipa)
+{
+ const char *name;
+ u32 offset;
+
+ offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ name = "filter/route hash flush";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
+ name = "maximal endpoint status";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ return true;
+}
+
+bool ipa_cmd_data_valid(struct ipa *ipa)
+{
+ if (!ipa_cmd_header_valid(ipa))
+ return false;
+
+ if (!ipa_cmd_register_write_valid(ipa))
+ return false;
+
+ return true;
+}
+
+#endif /* IPA_VALIDATE */
+
+int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+ int ret;
+
+ /* This is as good a place as any to validate build constants */
+ ipa_cmd_validate_build();
+
+ /* Even though command payloads are allocated one at a time,
+ * a single transaction can require up to tlv_count of them,
+ * so we treat them as if that many can be allocated at once.
+ */
+ ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->tlv_count);
+ if (ret)
+ return ret;
+
+ /* Each TRE needs a command info structure */
+ ret = gsi_trans_pool_init(&trans_info->info_pool,
+ sizeof(struct ipa_cmd_info),
+ tre_max, channel->tlv_count);
+ if (ret)
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+
+ return ret;
+}
+
+void ipa_cmd_pool_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+
+ gsi_trans_pool_exit(&trans_info->info_pool);
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+}
+
+static union ipa_cmd_payload *
+ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
+{
+ struct gsi_trans_info *trans_info;
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
+
+ return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
+}
+
+/* If hash_size is 0, hash_offset and hash_addr ignored. */
+void ipa_cmd_table_init_add(struct gsi_trans *trans,
+ enum ipa_cmd_opcode opcode, u16 size, u32 offset,
+ dma_addr_t addr, u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_hw_ip_fltrt_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u64 val;
+
+ /* Record the non-hash table offset and size */
+ offset += ipa->mem_offset;
+ val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+
+ /* The hash table offset and address are zero if its size is 0 */
+ if (hash_size) {
+ /* Record the hash table offset and size */
+ hash_offset += ipa->mem_offset;
+ val |= u64_encode_bits(hash_offset,
+ IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
+ val |= u64_encode_bits(hash_size,
+ IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->table_init;
+
+ /* Fill in all offsets and sizes and the non-hash table address */
+ if (hash_size)
+ payload->hash_rules_addr = cpu_to_le64(hash_addr);
+ payload->flags = cpu_to_le64(val);
+ payload->nhash_rules_addr = cpu_to_le64(addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Initialize header space in IPA-local memory */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_hw_hdr_init_local *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u32 flags;
+
+ offset += ipa->mem_offset;
+
+ /* With this command we tell the IPA where in its local memory the
+ * header tables reside. The content of the buffer provided is
+ * also written via DMA into that space. The IPA hardware owns
+ * the table, but the AP must initialize it.
+ */
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->hdr_init_local;
+
+ payload->hdr_table_addr = cpu_to_le64(addr);
+ flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ payload->flags = cpu_to_le32(flags);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_cmd_register_write *payload;
+ union ipa_cmd_payload *cmd_payload;
+ u32 opcode = IPA_CMD_REGISTER_WRITE;
+ dma_addr_t payload_addr;
+ u32 clear_option;
+ u32 options;
+ u16 flags;
+
+ /* pipeline_clear_src_grp is not used */
+ clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ u16 offset_high;
+ u32 val;
+
+ /* Opcode encodes pipeline clear options */
+ /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
+ val = u16_encode_bits(clear_option,
+ REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
+ opcode |= val;
+
+ /* Extract the high 4 bits from the offset */
+ offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
+ offset &= (1 << 16) - 1;
+
+ /* Extract the top 4 bits and encode it into the flags field */
+ flags = u16_encode_bits(offset_high,
+ REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ options = 0; /* reserved */
+
+ } else {
+ flags = 0; /* SKIP_CLEAR flag is always 0 */
+ options = u16_encode_bits(clear_option,
+ REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->register_write;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->offset = cpu_to_le16((u16)offset);
+ payload->value = cpu_to_le32(value);
+ payload->value_mask = cpu_to_le32(mask);
+ payload->clear_options = cpu_to_le32(options);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ DMA_NONE, opcode);
+}
+
+/* Skip IP packet processing on the next data transfer on a TX channel */
+static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_ip_packet_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ /* assert(endpoint_id <
+ field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_init;
+
+ payload->dest_endpoint = u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Use a 32-bit DMA command to zero a block of memory */
+void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR;
+ struct ipa_cmd_hw_dma_task_32b_addr *payload;
+ union ipa_cmd_payload *cmd_payload;
+ enum dma_data_direction direction;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* assert(addr <= U32_MAX); */
+ addr &= GENMASK_ULL(31, 0);
+
+ /* The opcode encodes the number of DMA operations in the high byte */
+ opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK);
+
+ direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */
+ flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK;
+ /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_task_32b_addr;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->size = cpu_to_le16(size);
+ payload->addr = cpu_to_le32((u32)addr);
+ payload->packet_size = cpu_to_le16(size);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Use a DMA command to read or write a block of IPA-resident memory */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
+ struct ipa_cmd_hw_dma_mem_mem *payload;
+ union ipa_cmd_payload *cmd_payload;
+ enum dma_data_direction direction;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* size and offset must fit in 16 bit fields */
+ /* assert(size > 0 && size <= U16_MAX); */
+ /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
+
+ offset += ipa->mem_offset;
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_shared_mem;
+
+ /* payload->clear_after_read was reserved prior to IPA v4.0. It's
+ * never needed for current code, so it's 0 regardless of version.
+ */
+ payload->size = cpu_to_le16(size);
+ payload->local_addr = cpu_to_le16(offset);
+ /* payload->flags:
+ * direction: 0 = write to IPA, 1 read from IPA
+ * Starting at v4.0 these are reserved; either way, all zero:
+ * pipeline clear: 0 = wait for pipeline clear (don't skip)
+ * clear_options: 0 = pipeline_clear_hps
+ * Instead, for v4.0+ these are encoded in the opcode. But again
+ * since both values are 0 we won't bother OR'ing them in.
+ */
+ flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
+ payload->flags = cpu_to_le16(flags);
+ payload->system_addr = cpu_to_le64(addr);
+
+ direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_ip_packet_tag_status *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_tag_status;
+
+ payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Issue a small command TX data transfer */
+static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ union ipa_cmd_payload *payload;
+ dma_addr_t payload_addr;
+
+ /* assert(size <= sizeof(*payload)); */
+
+ /* Just transfer a zero-filled payload structure */
+ payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+{
+ ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+#if 1
+ /* Reference these functions to avoid a compile error */
+ (void)ipa_cmd_ip_packet_init_add;
+ (void)ipa_cmd_ip_tag_status_add;
+ (void) ipa_cmd_transfer_add;
+#else
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct gsi_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
+
+ ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
+
+ ipa_cmd_transfer_add(trans, 4);
+#endif
+}
+
+/* Returns the number of commands required for the tag process */
+u32 ipa_cmd_tag_process_count(void)
+{
+ return 4;
+}
+
+static struct ipa_cmd_info *
+ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
+{
+ struct gsi_channel *channel;
+
+ channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
+
+ return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
+}
+
+/* Allocate a transaction for the command TX endpoint */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
+{
+ struct ipa_endpoint *endpoint;
+ struct gsi_trans *trans;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+
+ trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
+ if (trans)
+ trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
+
+ return trans;
+}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
new file mode 100644
index 000000000000..4917525b3a47
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_CMD_H_
+#define _IPA_CMD_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+struct sk_buff;
+struct scatterlist;
+
+struct ipa;
+struct ipa_mem;
+struct gsi_trans;
+struct gsi_channel;
+
+/**
+ * enum ipa_cmd_opcode: IPA immediate commands
+ *
+ * All immediate commands are issued using the AP command TX endpoint.
+ * The numeric values here are the opcodes for IPA v3.5.1 hardware.
+ *
+ * IPA_CMD_NONE is a special (invalid) value that's used to indicate
+ * a request is *not* an immediate command.
+ */
+enum ipa_cmd_opcode {
+ IPA_CMD_NONE = 0,
+ IPA_CMD_IP_V4_FILTER_INIT = 3,
+ IPA_CMD_IP_V6_FILTER_INIT = 4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 8,
+ IPA_CMD_HDR_INIT_LOCAL = 9,
+ IPA_CMD_REGISTER_WRITE = 12,
+ IPA_CMD_IP_PACKET_INIT = 16,
+ IPA_CMD_DMA_TASK_32B_ADDR = 17,
+ IPA_CMD_DMA_SHARED_MEM = 19,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 20,
+};
+
+/**
+ * struct ipa_cmd_info - information needed for an IPA immediate command
+ *
+ * @opcode: The command opcode.
+ * @direction: Direction of data transfer for DMA commands
+ */
+struct ipa_cmd_info {
+ enum ipa_cmd_opcode opcode;
+ enum dma_data_direction direction;
+};
+
+
+#ifdef IPA_VALIDATE
+
+/**
+ * ipa_cmd_table_valid() - Validate a memory region holding a table
+ * @ipa: - IPA pointer
+ * @mem: - IPA memory region descriptor
+ * @route: - Whether the region holds a route or filter table
+ * @ipv6: - Whether the table is for IPv6 or IPv4
+ * @hashed: - Whether the table is hashed or non-hashed
+ *
+ * @Return: true if region is valid, false otherwise
+ */
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route, bool ipv6, bool hashed);
+
+/**
+ * ipa_cmd_data_valid() - Validate command-realted configuration is valid
+ * @ipa: - IPA pointer
+ *
+ * @Return: true if assumptions required for command are valid
+ */
+bool ipa_cmd_data_valid(struct ipa *ipa);
+
+#else /* !IPA_VALIDATE */
+
+static inline bool ipa_cmd_table_valid(struct ipa *ipa,
+ const struct ipa_mem *mem, bool route,
+ bool ipv6, bool hashed)
+{
+ return true;
+}
+
+static inline bool ipa_cmd_data_valid(struct ipa *ipa)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/**
+ * ipa_cmd_pool_init() - initialize command channel pools
+ * @channel: AP->IPA command TX GSI channel pointer
+ * @tre_count: Number of pool elements to allocate
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count);
+
+/**
+ * ipa_cmd_pool_exit() - Inverse of ipa_cmd_pool_init()
+ * @channel: AP->IPA command TX GSI channel pointer
+ */
+void ipa_cmd_pool_exit(struct gsi_channel *channel);
+
+/**
+ * ipa_cmd_table_init_add() - Add table init command to a transaction
+ * @trans: GSI transaction
+ * @opcode: IPA immediate command opcode
+ * @size: Size of non-hashed routing table memory
+ * @offset: Offset in IPA shared memory of non-hashed routing table memory
+ * @addr: DMA address of non-hashed table data to write
+ * @hash_size: Size of hashed routing table memory
+ * @hash_offset: Offset in IPA shared memory of hashed routing table memory
+ * @hash_addr: DMA address of hashed table data to write
+ *
+ * If hash_size is 0, hash_offset and hash_addr are ignored.
+ */
+void ipa_cmd_table_init_add(struct gsi_trans *trans, enum ipa_cmd_opcode opcode,
+ u16 size, u32 offset, dma_addr_t addr,
+ u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr);
+
+/**
+ * ipa_cmd_hdr_init_local_add() - Add a header init command to a transaction
+ * @ipa: IPA structure
+ * @offset: Offset of header memory in IPA local space
+ * @size: Size of header memory
+ * @addr: DMA address of buffer to be written from
+ *
+ * Defines and fills the location in IPA memory to use for headers.
+ */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr);
+
+/**
+ * ipa_cmd_register_write_add() - Add a register write command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of register to be written
+ * @value: Value to be written
+ * @mask: Mask of bits in register to update with bits from value
+ * @clear_full: Pipeline clear option; true means full pipeline clear
+ */
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full);
+
+/**
+ * ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction
+ * @trans: GSi transaction
+ * @size: Number of bytes to be memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
+ dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of IPA memory to be read or written
+ * @size: Number of bytes of memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
+ u16 size, dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * @trans: GSI transaction
+ */
+void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+
+/**
+ * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ *
+ * @Return: The number of elements to allocate in a transaction
+ * to hold tag process commands
+ */
+u32 ipa_cmd_tag_process_count(void);
+
+/**
+ * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
+ * @ipa: IPA pointer
+ * @tre_count: Number of elements in the transaction
+ *
+ * @Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
+
+#endif /* _IPA_CMD_H_ */
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
new file mode 100644
index 000000000000..042b5fc3c135
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2019-2020 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/* Endpoint configuration for the SC7180 SoC. */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_DMA_ONLY,
+ .config = {
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 0,
+ .endpoint_id = 1,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .seq_type =
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 13,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ },
+};
+
+/* For the SC7180, resource groups are allocated this way:
+ * group 0: UL_DL
+ */
+static const struct ipa_resource_src ipa_resource_src[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ .limits[0] = {
+ .min = 3,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ .limits[0] = {
+ .min = 3,
+ .max = 3,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ .limits[0] = {
+ .min = 10,
+ .max = 10,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ .limits[0] = {
+ .min = 1,
+ .max = 1,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+ .limits[0] = {
+ .min = 5,
+ .max = 5,
+ },
+ },
+};
+
+static const struct ipa_resource_dst ipa_resource_dst[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ .limits[0] = {
+ .min = 3,
+ .max = 3,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ .limits[0] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+};
+
+/* Resource configuration for the SC7180 SoC. */
+static const struct ipa_resource_data ipa_resource_data = {
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region configuration for the SC7180 SoC. */
+static const struct ipa_mem ipa_mem_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0290,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0310,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0318,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0398,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x03a0,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0420,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0428,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x04a8,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x05e8,
+ .size = 0x0000,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x05f0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x07f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_PDN_CONFIG] = {
+ .offset = 0x09f8,
+ .size = 0x0050,
+ .canary_count = 2,
+ },
+ [IPA_MEM_STATS_QUOTA] = {
+ .offset = 0x0a50,
+ .size = 0x0060,
+ .canary_count = 2,
+ },
+ [IPA_MEM_STATS_TETHERING] = {
+ .offset = 0x0ab0,
+ .size = 0x0140,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_DROP] = {
+ .offset = 0x0bf0,
+ .size = 0,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x0bf0,
+ .size = 0x140c,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Configuration data for the SC7180 SoC. */
+const struct ipa_data ipa_data_sc7180 = {
+ .version = IPA_VERSION_4_2,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_count = ARRAY_SIZE(ipa_mem_data),
+ .mem_data = ipa_mem_data,
+};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
new file mode 100644
index 000000000000..0d9c36e1e806
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/* Endpoint configuration for the SDM845 SoC. */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 4,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_DMA_ONLY,
+ .config = {
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .seq_type =
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ .delay = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 13,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 12,
+ .toward_ipa = false,
+ },
+};
+
+/* For the SDM845, resource groups are allocated this way:
+ * group 0: LWA_DL
+ * group 1: UL_DL
+ */
+static const struct ipa_resource_src ipa_resource_src[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ .limits[0] = {
+ .min = 1,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ .limits[0] = {
+ .min = 10,
+ .max = 10,
+ },
+ .limits[1] = {
+ .min = 10,
+ .max = 10,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ .limits[0] = {
+ .min = 12,
+ .max = 12,
+ },
+ .limits[1] = {
+ .min = 14,
+ .max = 14,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ .limits[0] = {
+ .min = 0,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 0,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+ .limits[0] = {
+ .min = 14,
+ .max = 14,
+ },
+ .limits[1] = {
+ .min = 20,
+ .max = 20,
+ },
+ },
+};
+
+static const struct ipa_resource_dst ipa_resource_dst[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ .limits[0] = {
+ .min = 4,
+ .max = 4,
+ },
+ .limits[1] = {
+ .min = 4,
+ .max = 4,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ .limits[0] = {
+ .min = 2,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+};
+
+/* Resource configuration for the SDM845 SoC. */
+static const struct ipa_resource_data ipa_resource_data = {
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region configuration for the SDM845 SoC. */
+static const struct ipa_mem ipa_mem_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x07c8,
+ .size = 0x0000,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x0bd8,
+ .size = 0x1024,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x1c00,
+ .size = 0x0400,
+ .canary_count = 1,
+ },
+};
+
+/* Configuration data for the SDM845 SoC. */
+const struct ipa_data ipa_data_sdm845 = {
+ .version = IPA_VERSION_3_5_1,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_count = ARRAY_SIZE(ipa_mem_data),
+ .mem_data = ipa_mem_data,
+};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
new file mode 100644
index 000000000000..7110de2de817
--- /dev/null
+++ b/drivers/net/ipa/ipa_data.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_DATA_H_
+#define _IPA_DATA_H_
+
+#include <linux/types.h>
+
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA/GSI Configuration Data
+ *
+ * Boot-time configuration data is used to define the configuration of the
+ * IPA and GSI resources to use for a given platform. This data is supplied
+ * via the Device Tree match table, associated with a particular compatible
+ * string. The data defines information about resources, endpoints, and
+ * channels.
+ *
+ * Resources are data structures used internally by the IPA hardware. The
+ * configuration data defines the number (or limits of the number) of various
+ * types of these resources.
+ *
+ * Endpoint configuration data defines properties of both IPA endpoints and
+ * GSI channels. A channel is a GSI construct, and represents a single
+ * communication path between the IPA and a particular execution environment
+ * (EE), such as the AP or Modem. Each EE has a set of channels associated
+ * with it, and each channel has an ID unique for that EE. For the most part
+ * the only GSI channels of concern to this driver belong to the AP
+ *
+ * An endpoint is an IPA construct representing a single channel anywhere
+ * in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
+ * pair. Generally, this driver is concerned with only endpoints associated
+ * with the AP, however this will change when support for routing (etc.) is
+ * added. IPA endpoint and GSI channel configuration data are defined
+ * together, establishing the endpoint_id->(EE, channel_id) mapping.
+ *
+ * Endpoint configuration data consists of three parts: properties that
+ * are common to IPA and GSI (EE ID, channel ID, endpoint ID, and direction);
+ * properties associated with the GSI channel; and properties associated with
+ * the IPA endpoint.
+ */
+
+/* The maximum value returned by ipa_resource_group_count() */
+#define IPA_RESOURCE_GROUP_COUNT 4
+
+/** enum ipa_resource_type_src - source resource types */
+/**
+ * struct gsi_channel_data - GSI channel configuration data
+ * @tre_count: number of TREs in the channel ring
+ * @event_count: number of slots in the associated event ring
+ * @tlv_count: number of entries in channel's TLV FIFO
+ *
+ * A GSI channel is a unidirectional means of transferring data to or
+ * from (and through) the IPA. A GSI channel has a ring buffer made
+ * up of "transfer elements" (TREs) that specify individual data transfers
+ * or IPA immediate commands. TREs are filled by the AP, and control
+ * is passed to IPA hardware by writing the last written element
+ * into a doorbell register.
+ *
+ * When data transfer commands have completed the GSI generates an
+ * event (a structure of data) and optionally signals the AP with
+ * an interrupt. Event structures are implemented by another ring
+ * buffer, directed toward the AP from the IPA.
+ *
+ * The input to a GSI channel is a FIFO of type/length/value (TLV)
+ * elements, and the size of this FIFO limits the number of TREs
+ * that can be included in a single transaction.
+ */
+struct gsi_channel_data {
+ u16 tre_count;
+ u16 event_count;
+ u8 tlv_count;
+};
+
+/**
+ * struct ipa_endpoint_tx_data - configuration data for TX endpoints
+ * @status_endpoint: endpoint to which status elements are sent
+ * @delay: whether endpoint starts in delay mode
+ *
+ * Delay mode prevents a TX endpoint from transmitting anything, even if
+ * commands have been presented to the hardware. Once the endpoint exits
+ * delay mode, queued transfer commands are sent.
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx_data {
+ enum ipa_endpoint_name status_endpoint;
+ bool delay;
+};
+
+/**
+ * struct ipa_endpoint_rx_data - configuration data for RX endpoints
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met. One of those criteria is the sender indicating
+ * a "frame" consisting of several transfers has ended.
+ */
+struct ipa_endpoint_rx_data {
+ u32 pad_align;
+ bool aggr_close_eof;
+};
+
+/**
+ * struct ipa_endpoint_config_data - IPA endpoint hardware configuration
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config_data {
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx_data tx;
+ struct ipa_endpoint_rx_data rx;
+ };
+};
+
+/**
+ * struct ipa_endpoint_data - IPA endpoint configuration data
+ * @filter_support: whether endpoint supports filtering
+ * @seq_type: hardware sequencer type used for endpoint
+ * @config: hardware configuration (see above)
+ *
+ * Not all endpoints support the IPA filtering capability. A filter table
+ * defines the filters to apply for those endpoints that support it. The
+ * AP is responsible for initializing this table, and it must include entries
+ * for non-AP endpoints. For this reason we define *all* endpoints used
+ * in the system, and indicate whether they support filtering.
+ *
+ * The remaining endpoint configuration data applies only to AP endpoints.
+ * The IPA hardware is implemented by sequencers, and the AP must program
+ * the type(s) of these sequencers at initialization time. The remaining
+ * endpoint configuration data is defined above.
+ */
+struct ipa_endpoint_data {
+ bool filter_support;
+ /* The next two are specified only for AP endpoints */
+ enum ipa_seq_type seq_type;
+ struct ipa_endpoint_config_data config;
+};
+
+/**
+ * struct ipa_gsi_endpoint_data - GSI channel/IPA endpoint data
+ * ee: GSI execution environment ID
+ * channel_id: GSI channel ID
+ * endpoint_id: IPA endpoint ID
+ * toward_ipa: direction of data transfer
+ * gsi: GSI channel configuration data (see above)
+ * ipa: IPA endpoint configuration data (see above)
+ */
+struct ipa_gsi_endpoint_data {
+ u8 ee_id; /* enum gsi_ee_id */
+ u8 channel_id;
+ u8 endpoint_id;
+ bool toward_ipa;
+
+ struct gsi_channel_data channel;
+ struct ipa_endpoint_data endpoint;
+};
+
+/** enum ipa_resource_type_src - source resource types */
+enum ipa_resource_type_src {
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+};
+
+/** enum ipa_resource_type_dst - destination resource types */
+enum ipa_resource_type_dst {
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/**
+ * struct ipa_resource_limits - minimum and maximum resource counts
+ * @min: minimum number of resources of a given type
+ * @max: maximum number of resources of a given type
+ */
+struct ipa_resource_limits {
+ u32 min;
+ u32 max;
+};
+
+/**
+ * struct ipa_resource_src - source endpoint group resource usage
+ * @type: source group resource type
+ * @limits: array of limits to use for each resource group
+ */
+struct ipa_resource_src {
+ enum ipa_resource_type_src type;
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+};
+
+/**
+ * struct ipa_resource_dst - destination endpoint group resource usage
+ * @type: destination group resource type
+ * @limits: array of limits to use for each resource group
+ */
+struct ipa_resource_dst {
+ enum ipa_resource_type_dst type;
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+};
+
+/**
+ * struct ipa_resource_data - IPA resource configuration data
+ * @resource_src_count: number of entries in the resource_src array
+ * @resource_src: source endpoint group resources
+ * @resource_dst_count: number of entries in the resource_dst array
+ * @resource_dst: destination endpoint group resources
+ *
+ * In order to manage quality of service between endpoints, certain resources
+ * required for operation are allocated to groups of endpoints. Generally
+ * this information is invisible to the AP, but the AP is responsible for
+ * programming it at initialization time, so we specify it here.
+ */
+struct ipa_resource_data {
+ u32 resource_src_count;
+ const struct ipa_resource_src *resource_src;
+ u32 resource_dst_count;
+ const struct ipa_resource_dst *resource_dst;
+};
+
+/**
+ * struct ipa_mem - IPA-local memory region description
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count: number of 32-bit "canary" values that precede region
+ */
+struct ipa_mem_data {
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+/**
+ * struct ipa_data - combined IPA/GSI configuration data
+ * @version: IPA hardware version
+ * @endpoint_count: number of entries in endpoint_data array
+ * @endpoint_data: IPA endpoint/GSI channel data
+ * @resource_data: IPA resource configuration data
+ * @mem_count: number of entries in mem_data array
+ * @mem_data: IPA-local shared memory region data
+ */
+struct ipa_data {
+ enum ipa_version version;
+ u32 endpoint_count; /* # entries in endpoint_data[] */
+ const struct ipa_gsi_endpoint_data *endpoint_data;
+ const struct ipa_resource_data *resource_data;
+ u32 mem_count; /* # entries in mem_data[] */
+ const struct ipa_mem *mem_data;
+};
+
+extern const struct ipa_data ipa_data_sdm845;
+extern const struct ipa_data ipa_data_sc7180;
+
+#endif /* _IPA_DATA_H_ */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
new file mode 100644
index 000000000000..915b4cd05dd2
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -0,0 +1,1707 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/if_rmnet.h>
+#include <linux/version.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_table.h"
+#include "ipa_gsi.h"
+
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+
+#define IPA_REPLENISH_BATCH 16
+
+#define IPA_RX_BUFFER_SIZE (PAGE_SIZE << IPA_RX_BUFFER_ORDER)
+#define IPA_RX_BUFFER_ORDER 1 /* 8KB endpoint RX buffers (2 pages) */
+
+/* The amount of RX buffer space consumed by standard skb overhead */
+#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
+
+#define IPA_ENDPOINT_STOP_RX_RETRIES 10
+#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
+
+#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
+#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
+
+#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
+
+/** enum ipa_status_opcode - status element opcode hardware values */
+enum ipa_status_opcode {
+ IPA_STATUS_OPCODE_PACKET = 0x01,
+ IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
+ IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
+ IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
+ IPA_STATUS_OPCODE_LOG = 0x10,
+ IPA_STATUS_OPCODE_DCMP = 0x20,
+ IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
+};
+
+/** enum ipa_status_exception - status element exception type */
+enum ipa_status_exception {
+ /* 0 means no exception */
+ IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
+ IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
+ IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
+ IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
+ IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
+ /* The meaning of the next value depends on whether the IP version */
+ IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
+ IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
+};
+
+/* Status element provided by hardware */
+struct ipa_status {
+ u8 opcode; /* enum ipa_status_opcode */
+ u8 exception; /* enum ipa_status_exception */
+ __le16 mask;
+ __le16 pkt_len;
+ u8 endp_src_idx;
+ u8 endp_dst_idx;
+ __le32 metadata;
+ __le32 flags1;
+ __le64 flags2;
+ __le32 flags3;
+ __le32 flags4;
+};
+
+/* Field masks for struct ipa_status structure fields */
+
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
+
+#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
+
+#define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
+#define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
+#define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
+#define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
+#define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
+#define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
+#define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
+#define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
+#define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
+#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+
+#define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
+#define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
+#define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
+#define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
+
+#define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
+#define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
+
+#define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
+#define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
+#define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
+#define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
+#define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
+
+#ifdef IPA_VALIDATE
+
+static void ipa_endpoint_validate_build(void)
+{
+ /* The aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, which means that we need to supply
+ * enough space in a receive buffer to hold a complete MTU
+ * plus normal skb overhead *after* that aggregation byte
+ * limit has been crossed.
+ *
+ * This check just ensures we don't define a receive buffer
+ * size that would exceed what we can represent in the field
+ * that is used to program its size.
+ */
+ BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
+ field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
+ IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
+
+ /* I honestly don't know where this requirement comes from. But
+ * it holds, and if we someday need to loosen the constraint we
+ * can try to track it down.
+ */
+ BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
+}
+
+static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *all_data,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *other_data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name other_name;
+
+ if (ipa_gsi_endpoint_data_empty(data))
+ return true;
+
+ if (!data->toward_ipa) {
+ if (data->endpoint.filter_support) {
+ dev_err(dev, "filtering not supported for "
+ "RX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ return true; /* Nothing more to check for RX */
+ }
+
+ if (data->endpoint.config.status_enable) {
+ other_name = data->endpoint.config.tx.status_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "status endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* Status endpoint must be defined... */
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* ...and has to be an RX endpoint... */
+ if (other_data->toward_ipa) {
+ dev_err(dev,
+ "status endpoint for endpoint %u not RX\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ /* ...and if it's to be an AP endpoint... */
+ if (other_data->ee_id == GSI_EE_AP) {
+ /* ...make sure it has status enabled. */
+ if (!other_data->endpoint.config.status_enable) {
+ dev_err(dev,
+ "status not enabled for endpoint %u\n",
+ other_data->endpoint_id);
+ return false;
+ }
+ }
+ }
+
+ if (data->endpoint.config.dma_mode) {
+ other_name = data->endpoint.config.dma_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "DMA endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *dp = data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name name;
+
+ ipa_endpoint_validate_build();
+
+ if (count > IPA_ENDPOINT_COUNT) {
+ dev_err(dev, "too many endpoints specified (%u > %u)\n",
+ count, IPA_ENDPOINT_COUNT);
+ return false;
+ }
+
+ /* Make sure needed endpoints have defined data */
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
+ dev_err(dev, "command TX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
+ dev_err(dev, "LAN RX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
+ dev_err(dev, "AP->modem TX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
+ dev_err(dev, "AP<-modem RX endpoint not defined\n");
+ return false;
+ }
+
+ for (name = 0; name < count; name++, dp++)
+ if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
+ return false;
+
+ return true;
+}
+
+#else /* !IPA_VALIDATE */
+
+static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/* Allocate a transaction to use on a non-command endpoint */
+static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
+ u32 tre_count)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+ enum dma_data_direction direction;
+
+ direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
+}
+
+/* suspend_delay represents suspend for RX, delay for TX endpoints.
+ * Note that suspend is not supported starting with IPA v4.0.
+ */
+static int
+ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
+{
+ u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 mask;
+ u32 val;
+
+ /* assert(ipa->version == IPA_VERSION_3_5_1 */
+ mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
+
+ val = ioread32(ipa->reg_virt + offset);
+ if (suspend_delay == !!(val & mask))
+ return -EALREADY; /* Already set to desired state */
+
+ val ^= mask;
+ iowrite32(val, ipa->reg_virt + offset);
+
+ return 0;
+}
+
+/* Enable or disable delay or suspend mode on all modem endpoints */
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
+{
+ bool support_suspend;
+ u32 endpoint_id;
+
+ /* DELAY mode doesn't work right on IPA v4.2 */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
+ support_suspend = ipa->version == IPA_VERSION_3_5_1;
+
+ for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
+ if (endpoint->toward_ipa || support_suspend)
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ }
+}
+
+/* Reset all modem endpoints to use the default exception endpoint */
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+ struct gsi_trans *trans;
+ u32 count;
+
+ /* We need one command per modem TX endpoint. We can get an upper
+ * bound on that by assuming all initialized endpoints are modem->IPA.
+ * That won't happen, and we could be more precise, but this is fine
+ * for now. We need to end the transactio with a "tag process."
+ */
+ count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to reset modem exception endpoints\n");
+ return -EBUSY;
+ }
+
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+ struct ipa_endpoint *endpoint;
+ u32 offset;
+
+ initialized ^= BIT(endpoint_id);
+
+ /* We only reset modem TX endpoints */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
+ continue;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+
+ /* Value written is 0, and all bits are updated. That
+ * means status is disabled on the endpoint, and as a
+ * result all other fields in the register are ignored.
+ */
+ ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
+ }
+
+ ipa_cmd_tag_process_add(trans);
+
+ /* XXX This should have a 1 second timeout */
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ /* FRAG_OFFLOAD_EN is 0 */
+ if (endpoint->data->checksum) {
+ if (endpoint->toward_ipa) {
+ u32 checksum_offset;
+
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
+ CS_OFFLOAD_EN_FMASK);
+ /* Checksum header offset is in 4-byte units */
+ checksum_offset = sizeof(struct rmnet_map_header);
+ checksum_offset /= sizeof(u32);
+ val |= u32_encode_bits(checksum_offset,
+ CS_METADATA_HDR_OFFSET_FMASK);
+ } else {
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
+ CS_OFFLOAD_EN_FMASK);
+ }
+ } else {
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
+ CS_OFFLOAD_EN_FMASK);
+ }
+ /* CS_GEN_QMB_MASTER_SEL is 0 */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ if (endpoint->data->qmap) {
+ size_t header_size = sizeof(struct rmnet_map_header);
+
+ if (endpoint->toward_ipa && endpoint->data->checksum)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+
+ val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
+ /* metadata is the 4 byte rmnet_map header itself */
+ val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK);
+ /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
+ if (!endpoint->toward_ipa) {
+ u32 size_offset = offsetof(struct rmnet_map_header,
+ pkt_len);
+
+ val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
+ val |= u32_encode_bits(size_offset,
+ HDR_OFST_PKT_SIZE_FMASK);
+ }
+ /* HDR_A5_MUX is 0 */
+ /* HDR_LEN_INC_DEAGG_HDR is 0 */
+ /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
+ u32 pad_align = endpoint->data->rx.pad_align;
+ u32 val = 0;
+
+ val |= HDR_ENDIANNESS_FMASK; /* big endian */
+ val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ if (!endpoint->toward_ipa)
+ val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/**
+ * Generate a metadata mask value that will select only the mux_id
+ * field in an rmnet_map header structure. The mux_id is at offset
+ * 1 byte from the beginning of the structure, but the metadata
+ * value is treated as a 4-byte unit. So this mask must be computed
+ * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
+ * will convert this value to the proper byte order.
+ *
+ * Marked __always_inline because this is really computing a
+ * constant value.
+ */
+static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void)
+{
+ size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id);
+ u32 mux_id_mask = 0;
+ u8 *bytes;
+
+ bytes = (u8 *)&mux_id_mask;
+ bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */
+
+ return cpu_to_be32(mux_id_mask);
+}
+
+static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 val = 0;
+ u32 offset;
+
+ offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+
+ if (!endpoint->toward_ipa && endpoint->data->qmap)
+ val = ipa_rmnet_mux_id_metadata_mask();
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ u32 val;
+
+ if (endpoint->toward_ipa && endpoint->data->dma_mode) {
+ enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
+ u32 dma_endpoint_id;
+
+ dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
+
+ val = u32_encode_bits(IPA_DMA, MODE_FMASK);
+ val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ } else {
+ val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ }
+ /* Other bitfields unspecified (and 0) */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
+{
+ /* We don't use "hard byte limit" aggregation, so we define the
+ * aggregation limit such that our buffer has enough space *after*
+ * that limit to receive a full MTU of data, plus overhead.
+ */
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ return rx_buffer_size / SZ_1K;
+}
+
+static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ if (endpoint->data->aggregation) {
+ if (!endpoint->toward_ipa) {
+ u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ u32 limit;
+
+ val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ val |= u32_encode_bits(aggr_size,
+ AGGR_BYTE_LIMIT_FMASK);
+ limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
+ val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
+ AGGR_TIME_LIMIT_FMASK);
+ val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
+ if (endpoint->data->rx.aggr_close_eof)
+ val |= AGGR_SW_EOF_ACTIVE_FMASK;
+ /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
+ } else {
+ val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
+ AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ /* other fields ignored */
+ }
+ /* AGGR_FORCE_CLOSE is 0 */
+ } else {
+ val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ /* other fields ignored */
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* A return value of 0 indicates an error */
+static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+{
+ u32 scale;
+ u32 base;
+ u32 val;
+
+ if (!microseconds)
+ return 0; /* invalid delay */
+
+ /* Timer is represented in units of clock ticks. */
+ if (ipa->version < IPA_VERSION_4_2)
+ return microseconds; /* XXX Needs to be computed */
+
+ /* IPA v4.2 represents the tick count as base * scale */
+ scale = 1; /* XXX Needs to be computed */
+ if (scale > field_max(SCALE_FMASK))
+ return 0; /* scale too big */
+
+ base = DIV_ROUND_CLOSEST(microseconds, scale);
+ if (base > field_max(BASE_VALUE_FMASK))
+ return 0; /* microseconds too big */
+
+ val = u32_encode_bits(scale, SCALE_FMASK);
+ val |= u32_encode_bits(base, BASE_VALUE_FMASK);
+
+ return val;
+}
+
+static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* XXX We'll fix this when the register definition is clear */
+ if (microseconds) {
+ struct device *dev = &ipa->pdev->dev;
+
+ dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
+ endpoint_id);
+ microseconds = 0;
+ }
+
+ if (microseconds) {
+ val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
+ if (!val)
+ return -EINVAL;
+ } else {
+ val = 0; /* timeout is immediate */
+ }
+ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
+
+ return 0;
+}
+
+static void
+ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 offset;
+ u32 val;
+
+ val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
+ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
+{
+ u32 i;
+
+ for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[i];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
+ ipa_endpoint_init_hol_block_enable(endpoint, true);
+ }
+}
+
+static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ /* DEAGGR_HDR_LEN is 0 */
+ /* PACKET_OFFSET_VALID is 0 */
+ /* PACKET_OFFSET_LOCATION is ignored (not valid) */
+ /* MAX_PACKET_LEN is 0 (not enforced) */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
+ u32 seq_type = endpoint->seq_type;
+ u32 val = 0;
+
+ val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
+ val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
+ /* HPS_REP_SEQ_TYPE is 0 */
+ /* DPS_REP_SEQ_TYPE is 0 */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/**
+ * ipa_endpoint_skb_tx() - Transmit a socket buffer
+ * @endpoint: Endpoint pointer
+ * @skb: Socket buffer to send
+ *
+ * Returns: 0 if successful, or a negative error code
+ */
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
+{
+ struct gsi_trans *trans;
+ u32 nr_frags;
+ int ret;
+
+ /* Make sure source endpoint's TLV FIFO has enough entries to
+ * hold the linear portion of the skb and all its fragments.
+ * If not, see if we can linearize it before giving up.
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (skb_linearize(skb))
+ return -E2BIG;
+ nr_frags = 0;
+ }
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
+ if (!trans)
+ return -EBUSY;
+
+ ret = gsi_trans_skb_add(trans, skb);
+ if (ret)
+ goto err_trans_free;
+ trans->data = skb; /* transaction owns skb now */
+
+ gsi_trans_commit(trans, !netdev_xmit_more());
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+
+ return -ENOMEM;
+}
+
+static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 val = 0;
+ u32 offset;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+
+ if (endpoint->data->status_enable) {
+ val |= STATUS_EN_FMASK;
+ if (endpoint->toward_ipa) {
+ enum ipa_endpoint_name name;
+ u32 status_endpoint_id;
+
+ name = endpoint->data->tx.status_endpoint;
+ status_endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ val |= u32_encode_bits(status_endpoint_id,
+ STATUS_ENDP_FMASK);
+ }
+ /* STATUS_LOCATION is 0 (status element precedes packet) */
+ /* The next field is present for IPA v4.0 and above */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 */
+ }
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi_trans *trans;
+ bool doorbell = false;
+ struct page *page;
+ u32 offset;
+ u32 len;
+ int ret;
+
+ page = dev_alloc_pages(IPA_RX_BUFFER_ORDER);
+ if (!page)
+ return -ENOMEM;
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1);
+ if (!trans)
+ goto err_free_pages;
+
+ /* Offset the buffer to make space for skb headroom */
+ offset = NET_SKB_PAD;
+ len = IPA_RX_BUFFER_SIZE - offset;
+
+ ret = gsi_trans_page_add(trans, page, len, offset);
+ if (ret)
+ goto err_trans_free;
+ trans->data = page; /* transaction owns page now */
+
+ if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
+ doorbell = true;
+ endpoint->replenish_ready = 0;
+ }
+
+ gsi_trans_commit(trans, doorbell);
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+err_free_pages:
+ __free_pages(page, IPA_RX_BUFFER_ORDER);
+
+ return -ENOMEM;
+}
+
+/**
+ * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ *
+ * Allocate RX packet wrapper structures with maximal socket buffers
+ * for an endpoint. These are supplied to the hardware, which fills
+ * them with incoming data.
+ */
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+{
+ struct gsi *gsi;
+ u32 backlog;
+
+ if (!endpoint->replenish_enabled) {
+ if (count)
+ atomic_add(count, &endpoint->replenish_saved);
+ return;
+ }
+
+
+ while (atomic_dec_not_zero(&endpoint->replenish_backlog))
+ if (ipa_endpoint_replenish_one(endpoint))
+ goto try_again_later;
+ if (count)
+ atomic_add(count, &endpoint->replenish_backlog);
+
+ return;
+
+try_again_later:
+ /* The last one didn't succeed, so fix the backlog */
+ backlog = atomic_inc_return(&endpoint->replenish_backlog);
+
+ if (count)
+ atomic_add(count, &endpoint->replenish_backlog);
+
+ /* Whenever a receive buffer transaction completes we'll try to
+ * replenish again. It's unlikely, but if we fail to supply even
+ * one buffer, nothing will trigger another replenish attempt.
+ * Receive buffer transactions use one TRE, so schedule work to
+ * try replenishing again if our backlog is *all* available TREs.
+ */
+ gsi = &endpoint->ipa->gsi;
+ if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ schedule_delayed_work(&endpoint->replenish_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 max_backlog;
+ u32 saved;
+
+ endpoint->replenish_enabled = true;
+ while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
+ atomic_add(saved, &endpoint->replenish_backlog);
+
+ /* Start replenishing if hardware currently has no buffers */
+ max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
+ if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
+ ipa_endpoint_replenish(endpoint, 0);
+}
+
+static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
+{
+ u32 backlog;
+
+ endpoint->replenish_enabled = false;
+ while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
+ atomic_add(backlog, &endpoint->replenish_saved);
+}
+
+static void ipa_endpoint_replenish_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
+
+ ipa_endpoint_replenish(endpoint, 0);
+}
+
+static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
+ void *data, u32 len, u32 extra)
+{
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
+
+ /* Now receive it, or drop it if there's no netdev */
+ if (endpoint->netdev)
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+ else if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
+ struct page *page, u32 len)
+{
+ struct sk_buff *skb;
+
+ /* Nothing to do if there's no netdev */
+ if (!endpoint->netdev)
+ return false;
+
+ /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
+ skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ if (skb) {
+ /* Reserve the headroom and account for the data */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_put(skb, len);
+ }
+
+ /* Receive the buffer (or record drop if unable to build it) */
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+
+ return skb != NULL;
+}
+
+/* The format of a packet status element is the same for several status
+ * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
+ * aren't currently supported
+ */
+static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
+{
+ switch (opcode) {
+ case IPA_STATUS_OPCODE_PACKET:
+ case IPA_STATUS_OPCODE_DROPPED_PACKET:
+ case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
+ case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ u32 endpoint_id;
+
+ if (!ipa_status_format_packet(status->opcode))
+ return true;
+ if (!status->pkt_len)
+ return true;
+ endpoint_id = u32_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
+ if (endpoint_id != endpoint->endpoint_id)
+ return true;
+
+ return false; /* Don't skip this packet, process it */
+}
+
+/* Return whether the status indicates the packet should be dropped */
+static bool ipa_status_drop_packet(const struct ipa_status *status)
+{
+ u32 val;
+
+ /* Deaggregation exceptions we drop; others we consume */
+ if (status->exception)
+ return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
+
+ /* Drop the packet if it fails to match a routing rule; otherwise no */
+ val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+
+ return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+}
+
+static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
+ struct page *page, u32 total_len)
+{
+ void *data = page_address(page) + NET_SKB_PAD;
+ u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 resid = total_len;
+
+ while (resid) {
+ const struct ipa_status *status = data;
+ u32 align;
+ u32 len;
+
+ if (resid < sizeof(*status)) {
+ dev_err(&endpoint->ipa->pdev->dev,
+ "short message (%u bytes < %zu byte status)\n",
+ resid, sizeof(*status));
+ break;
+ }
+
+ /* Skip over status packets that lack packet data */
+ if (ipa_endpoint_status_skip(endpoint, status)) {
+ data += sizeof(*status);
+ resid -= sizeof(*status);
+ continue;
+ }
+
+ /* Compute the amount of buffer space consumed by the
+ * packet, including the status element. If the hardware
+ * is configured to pad packet data to an aligned boundary,
+ * account for that. And if checksum offload is is enabled
+ * a trailer containing computed checksum information will
+ * be appended.
+ */
+ align = endpoint->data->rx.pad_align ? : 1;
+ len = le16_to_cpu(status->pkt_len);
+ len = sizeof(*status) + ALIGN(len, align);
+ if (endpoint->data->checksum)
+ len += sizeof(struct rmnet_map_dl_csum_trailer);
+
+ /* Charge the new packet with a proportional fraction of
+ * the unused space in the original receive buffer.
+ * XXX Charge a proportion of the *whole* receive buffer?
+ */
+ if (!ipa_status_drop_packet(status)) {
+ u32 extra = unused * len / total_len;
+ void *data2 = data + sizeof(*status);
+ u32 len2 = le16_to_cpu(status->pkt_len);
+
+ /* Client receives only packet data (no status) */
+ ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
+ }
+
+ /* Consume status and the full packet it describes */
+ data += len;
+ resid -= len;
+ }
+}
+
+/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
+static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+}
+
+/* Complete transaction initiated in ipa_endpoint_replenish_one() */
+static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ struct page *page;
+
+ ipa_endpoint_replenish(endpoint, 1);
+
+ if (trans->cancelled)
+ return;
+
+ /* Parse or build a socket buffer using the actual received length */
+ page = trans->data;
+ if (endpoint->data->status_enable)
+ ipa_endpoint_status_parse(endpoint, page, trans->len);
+ else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
+ trans->data = NULL; /* Pages have been consumed */
+}
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa)
+ ipa_endpoint_tx_complete(endpoint, trans);
+ else
+ ipa_endpoint_rx_complete(endpoint, trans);
+}
+
+void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa) {
+ struct ipa *ipa = endpoint->ipa;
+
+ /* Nothing to do for command transactions */
+ if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
+ struct sk_buff *skb = trans->data;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ struct page *page = trans->data;
+
+ if (page)
+ __free_pages(page, IPA_RX_BUFFER_ORDER);
+ }
+}
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
+{
+ u32 val;
+
+ /* ROUTE_DIS is 0 */
+ val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
+ val |= ROUTE_DEF_HDR_TABLE_FMASK;
+ val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
+ val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
+ val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+}
+
+void ipa_endpoint_default_route_clear(struct ipa *ipa)
+{
+ ipa_endpoint_default_route_set(ipa, 0);
+}
+
+static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ offset = ipa_reg_state_aggr_active_offset(ipa->version);
+ val = ioread32(ipa->reg_virt + offset);
+
+ return !!(val & mask);
+}
+
+static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(mask & ipa->available); */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+}
+
+/**
+ * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
+ * @endpoint: Endpoint to be reset
+ *
+ * If aggregation is active on an RX endpoint when a reset is performed
+ * on its underlying GSI channel, a special sequence of actions must be
+ * taken to ensure the IPA pipeline is properly cleared.
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct ipa *ipa = endpoint->ipa;
+ bool endpoint_suspended = false;
+ struct gsi *gsi = &ipa->gsi;
+ dma_addr_t addr;
+ bool db_enable;
+ u32 retries;
+ u32 len = 1;
+ void *virt;
+ int ret;
+
+ virt = kzalloc(len, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ /* Force close aggregation before issuing the reset */
+ ipa_endpoint_force_close(endpoint);
+
+ /* Reset and reconfigure the channel with the doorbell engine
+ * disabled. Then poll until we know aggregation is no longer
+ * active. We'll re-enable the doorbell (if appropriate) when
+ * we reset again below.
+ */
+ gsi_channel_reset(gsi, endpoint->channel_id, false);
+
+ /* Make sure the channel isn't suspended */
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1)
+ if (!ipa_endpoint_init_ctrl(endpoint, false))
+ endpoint_suspended = true;
+
+ /* Start channel and do a 1 byte read */
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret)
+ goto out_suspend_again;
+
+ ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
+ if (ret)
+ goto err_endpoint_stop;
+
+ /* Wait for aggregation to be closed on the channel */
+ retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
+ do {
+ if (!ipa_endpoint_aggr_active(endpoint))
+ break;
+ msleep(1);
+ } while (retries--);
+
+ /* Check one last time */
+ if (ipa_endpoint_aggr_active(endpoint))
+ dev_err(dev, "endpoint %u still active during reset\n",
+ endpoint->endpoint_id);
+
+ gsi_trans_read_byte_done(gsi, endpoint->channel_id);
+
+ ret = ipa_endpoint_stop(endpoint);
+ if (ret)
+ goto out_suspend_again;
+
+ /* Finally, reset and reconfigure the channel again (re-enabling the
+ * the doorbell engine if appropriate). Sleep for 1 millisecond to
+ * complete the channel reset sequence. Finish by suspending the
+ * channel again (if necessary).
+ */
+ db_enable = ipa->version == IPA_VERSION_3_5_1;
+ gsi_channel_reset(gsi, endpoint->channel_id, db_enable);
+
+ msleep(1);
+
+ goto out_suspend_again;
+
+err_endpoint_stop:
+ ipa_endpoint_stop(endpoint);
+out_suspend_again:
+ if (endpoint_suspended)
+ (void)ipa_endpoint_init_ctrl(endpoint, true);
+ dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
+out_kfree:
+ kfree(virt);
+
+ return ret;
+}
+
+static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
+{
+ u32 channel_id = endpoint->channel_id;
+ struct ipa *ipa = endpoint->ipa;
+ bool db_enable;
+ bool special;
+ int ret = 0;
+
+ /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
+ * is active, we need to handle things specially to recover.
+ * All other cases just need to reset the underlying GSI channel.
+ *
+ * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
+ */
+ db_enable = ipa->version == IPA_VERSION_3_5_1;
+ special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ if (special && ipa_endpoint_aggr_active(endpoint))
+ ret = ipa_endpoint_reset_rx_aggr(endpoint);
+ else
+ gsi_channel_reset(&ipa->gsi, channel_id, db_enable);
+
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d resetting channel %u for endpoint %u\n",
+ ret, endpoint->channel_id, endpoint->endpoint_id);
+}
+
+static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
+{
+ u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
+ struct gsi_trans *trans;
+ dma_addr_t addr;
+ int ret;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for RX endpoint STOP workaround\n");
+ return -EBUSY;
+ }
+
+ /* Read into the highest part of the zero memory area */
+ addr = ipa->zero_addr + ipa->zero_size - size;
+
+ ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
+
+ ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
+ if (ret)
+ gsi_trans_free(trans);
+
+ return ret;
+}
+
+/**
+ * ipa_endpoint_stop() - Stops a GSI channel in IPA
+ * @client: Client whose endpoint should be stopped
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is is STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
+{
+ u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES;
+ int ret;
+
+ do {
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
+ if (ret != -EAGAIN)
+ break;
+
+ if (endpoint->toward_ipa)
+ continue;
+
+ /* For IPA v3.5.1, send a DMA read task and check again */
+ if (ipa->version == IPA_VERSION_3_5_1) {
+ ret = ipa_endpoint_stop_rx_dma(ipa);
+ if (ret)
+ break;
+ }
+
+ msleep(1);
+ } while (retries--);
+
+ return retries ? ret : -EIO;
+}
+
+static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ int ret;
+
+ if (endpoint->toward_ipa) {
+ bool delay_mode = endpoint->data->tx.delay;
+
+ ret = ipa_endpoint_init_ctrl(endpoint, delay_mode);
+ /* Endpoint is expected to not be in delay mode */
+ if (!ret != delay_mode) {
+ dev_warn(dev,
+ "TX endpoint %u was %sin delay mode\n",
+ endpoint->endpoint_id,
+ delay_mode ? "already " : "");
+ }
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_seq(endpoint);
+ } else {
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1) {
+ if (!ipa_endpoint_init_ctrl(endpoint, false))
+ dev_warn(dev,
+ "RX endpoint %u was suspended\n",
+ endpoint->endpoint_id);
+ }
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ }
+ ipa_endpoint_init_cfg(endpoint);
+ ipa_endpoint_init_hdr(endpoint);
+ ipa_endpoint_init_hdr_metadata_mask(endpoint);
+ ipa_endpoint_init_mode(endpoint);
+ ipa_endpoint_status(endpoint);
+}
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+ int ret;
+
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret) {
+ dev_err(&ipa->pdev->dev,
+ "error %d starting %cX channel %u for endpoint %u\n",
+ ret, endpoint->toward_ipa ? 'T' : 'R',
+ endpoint->channel_id, endpoint->endpoint_id);
+ return ret;
+ }
+
+ if (!endpoint->toward_ipa) {
+ ipa_interrupt_suspend_enable(ipa->interrupt,
+ endpoint->endpoint_id);
+ ipa_endpoint_replenish_enable(endpoint);
+ }
+
+ ipa->enabled |= BIT(endpoint->endpoint_id);
+
+ return 0;
+}
+
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & mask))
+ return;
+
+ endpoint->ipa->enabled ^= mask;
+
+ if (!endpoint->toward_ipa) {
+ ipa_endpoint_replenish_disable(endpoint);
+ ipa_interrupt_suspend_disable(ipa->interrupt,
+ endpoint->endpoint_id);
+ }
+
+ /* Note that if stop fails, the channel's state is not well-defined */
+ ret = ipa_endpoint_stop(endpoint);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d attempting to stop endpoint %u\n", ret,
+ endpoint->endpoint_id);
+}
+
+/**
+ * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
+ * @endpoint_id: Endpoint on which to emulate a suspend
+ *
+ * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
+ * with an open aggregation frame. This is to work around a hardware
+ * issue in IPA version 3.5.1 where the suspend interrupt will not be
+ * generated when it should be.
+ */
+static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(ipa->version == IPA_VERSION_3_5_1); */
+
+ if (!endpoint->data->aggregation)
+ return;
+
+ /* Nothing to do if the endpoint doesn't have aggregation open */
+ if (!ipa_endpoint_aggr_active(endpoint))
+ return;
+
+ /* Force close aggregation */
+ ipa_endpoint_force_close(endpoint);
+
+ ipa_interrupt_simulate_suspend(ipa->interrupt);
+}
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ bool stop_channel;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ return;
+
+ if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_disable(endpoint);
+
+ /* IPA v3.5.1 doesn't use channel stop for suspend */
+ stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ if (!endpoint->toward_ipa && !stop_channel) {
+ /* Due to a hardware bug, a client suspended with an open
+ * aggregation frame will not generate a SUSPEND IPA
+ * interrupt. We work around this by force-closing the
+ * aggregation frame, then simulating the arrival of such
+ * an interrupt.
+ */
+ WARN_ON(ipa_endpoint_init_ctrl(endpoint, true));
+ ipa_endpoint_suspend_aggr(endpoint);
+ }
+
+ ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
+ if (ret)
+ dev_err(dev, "error %d suspending channel %u\n", ret,
+ endpoint->channel_id);
+}
+
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ bool start_channel;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ return;
+
+ /* IPA v3.5.1 doesn't use channel start for resume */
+ start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ if (!endpoint->toward_ipa && !start_channel)
+ WARN_ON(ipa_endpoint_init_ctrl(endpoint, false));
+
+ ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
+ if (ret)
+ dev_err(dev, "error %d resuming channel %u\n", ret,
+ endpoint->channel_id);
+ else if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_enable(endpoint);
+}
+
+void ipa_endpoint_suspend(struct ipa *ipa)
+{
+ if (ipa->modem_netdev)
+ ipa_modem_suspend(ipa->modem_netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+}
+
+void ipa_endpoint_resume(struct ipa *ipa)
+{
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+
+ if (ipa->modem_netdev)
+ ipa_modem_resume(ipa->modem_netdev);
+}
+
+static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+
+ /* Only AP endpoints get set up */
+ if (endpoint->ee_id != GSI_EE_AP)
+ return;
+
+ endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ if (!endpoint->toward_ipa) {
+ /* RX transactions require a single TRE, so the maximum
+ * backlog is the same as the maximum outstanding TREs.
+ */
+ endpoint->replenish_enabled = false;
+ atomic_set(&endpoint->replenish_saved,
+ gsi_channel_tre_max(gsi, endpoint->channel_id));
+ atomic_set(&endpoint->replenish_backlog, 0);
+ INIT_DELAYED_WORK(&endpoint->replenish_work,
+ ipa_endpoint_replenish_work);
+ }
+
+ ipa_endpoint_program(endpoint);
+
+ endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
+}
+
+static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
+{
+ endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
+
+ if (!endpoint->toward_ipa)
+ cancel_delayed_work_sync(&endpoint->replenish_work);
+
+ ipa_endpoint_reset(endpoint);
+}
+
+void ipa_endpoint_setup(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+
+ ipa->set_up = 0;
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+
+ initialized ^= BIT(endpoint_id);
+
+ ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
+ }
+}
+
+void ipa_endpoint_teardown(struct ipa *ipa)
+{
+ u32 set_up = ipa->set_up;
+
+ while (set_up) {
+ u32 endpoint_id = __fls(set_up);
+
+ set_up ^= BIT(endpoint_id);
+
+ ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
+ }
+ ipa->set_up = 0;
+}
+
+int ipa_endpoint_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 initialized;
+ u32 rx_base;
+ u32 rx_mask;
+ u32 tx_mask;
+ int ret = 0;
+ u32 max;
+ u32 val;
+
+ /* Find out about the endpoints supplied by the hardware, and ensure
+ * the highest one doesn't exceed the number we support.
+ */
+ val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+
+ /* Our RX is an IPA producer */
+ rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
+ max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
+ if (max > IPA_ENDPOINT_MAX) {
+ dev_err(dev, "too many endpoints (%u > %u)\n",
+ max, IPA_ENDPOINT_MAX);
+ return -EINVAL;
+ }
+ rx_mask = GENMASK(max - 1, rx_base);
+
+ /* Our TX is an IPA consumer */
+ max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
+ tx_mask = GENMASK(max - 1, 0);
+
+ ipa->available = rx_mask | tx_mask;
+
+ /* Check for initialized endpoints not supported by the hardware */
+ if (ipa->initialized & ~ipa->available) {
+ dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
+ ipa->initialized & ~ipa->available);
+ ret = -EINVAL; /* Report other errors too */
+ }
+
+ initialized = ipa->initialized;
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+ struct ipa_endpoint *endpoint;
+
+ initialized ^= BIT(endpoint_id);
+
+ /* Make sure it's pointing in the right direction */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
+ dev_err(dev, "endpoint id %u wrong direction\n",
+ endpoint_id);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+void ipa_endpoint_deconfig(struct ipa *ipa)
+{
+ ipa->available = 0; /* Nothing more to do */
+}
+
+static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ struct ipa_endpoint *endpoint;
+
+ endpoint = &ipa->endpoint[data->endpoint_id];
+
+ if (data->ee_id == GSI_EE_AP)
+ ipa->channel_map[data->channel_id] = endpoint;
+ ipa->name_map[name] = endpoint;
+
+ endpoint->ipa = ipa;
+ endpoint->ee_id = data->ee_id;
+ endpoint->seq_type = data->endpoint.seq_type;
+ endpoint->channel_id = data->channel_id;
+ endpoint->endpoint_id = data->endpoint_id;
+ endpoint->toward_ipa = data->toward_ipa;
+ endpoint->data = &data->endpoint.config;
+
+ ipa->initialized |= BIT(endpoint->endpoint_id);
+}
+
+void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
+{
+ endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+}
+
+void ipa_endpoint_exit(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+
+ while (initialized) {
+ u32 endpoint_id = __fls(initialized);
+
+ initialized ^= BIT(endpoint_id);
+
+ ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
+ }
+ memset(ipa->name_map, 0, sizeof(ipa->name_map));
+ memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
+}
+
+/* Returns a bitmask of endpoints that support filtering, or 0 on error */
+u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ enum ipa_endpoint_name name;
+ u32 filter_map;
+
+ if (!ipa_endpoint_data_valid(ipa, count, data))
+ return 0; /* Error */
+
+ ipa->initialized = 0;
+
+ filter_map = 0;
+ for (name = 0; name < count; name++, data++) {
+ if (ipa_gsi_endpoint_data_empty(data))
+ continue; /* Skip over empty slots */
+
+ ipa_endpoint_init_one(ipa, name, data);
+
+ if (data->endpoint.filter_support)
+ filter_map |= BIT(data->endpoint_id);
+ }
+
+ if (!ipa_filter_map_valid(ipa, filter_map))
+ goto err_endpoint_exit;
+
+ return filter_map; /* Non-zero bitmask */
+
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+
+ return 0; /* Error */
+}
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
new file mode 100644
index 000000000000..4b336a1f759d
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_ENDPOINT_H_
+#define _IPA_ENDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+
+#include "gsi.h"
+#include "ipa_reg.h"
+
+struct net_device;
+struct sk_buff;
+
+struct ipa;
+struct ipa_gsi_endpoint_data;
+
+/* Non-zero granularity of counter used to implement aggregation timeout */
+#define IPA_AGGR_GRANULARITY 500 /* microseconds */
+
+#define IPA_MTU ETH_DATA_LEN
+
+enum ipa_endpoint_name {
+ IPA_ENDPOINT_AP_MODEM_TX = 0,
+ IPA_ENDPOINT_MODEM_LAN_TX,
+ IPA_ENDPOINT_MODEM_COMMAND_TX,
+ IPA_ENDPOINT_AP_COMMAND_TX,
+ IPA_ENDPOINT_MODEM_AP_TX,
+ IPA_ENDPOINT_AP_LAN_RX,
+ IPA_ENDPOINT_AP_MODEM_RX,
+ IPA_ENDPOINT_MODEM_AP_RX,
+ IPA_ENDPOINT_MODEM_LAN_RX,
+ IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
+};
+
+#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
+
+/**
+ * struct ipa_endpoint - IPA endpoint information
+ * @client: Client associated with the endpoint
+ * @channel_id: EP's GSI channel
+ * @evt_ring_id: EP's GSI channel event ring
+ */
+struct ipa_endpoint {
+ struct ipa *ipa;
+ enum ipa_seq_type seq_type;
+ enum gsi_ee_id ee_id;
+ u32 channel_id;
+ u32 endpoint_id;
+ bool toward_ipa;
+ const struct ipa_endpoint_config_data *data;
+
+ u32 trans_tre_max; /* maximum descriptors per transaction */
+ u32 evt_ring_id;
+
+ /* Net device this endpoint is associated with, if any */
+ struct net_device *netdev;
+
+ /* Receive buffer replenishing for RX endpoints */
+ bool replenish_enabled;
+ u32 replenish_ready;
+ atomic_t replenish_saved;
+ atomic_t replenish_backlog;
+ struct delayed_work replenish_work; /* global wq */
+};
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa);
+
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable);
+
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
+
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
+
+int ipa_endpoint_stop(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend(struct ipa *ipa);
+void ipa_endpoint_resume(struct ipa *ipa);
+
+void ipa_endpoint_setup(struct ipa *ipa);
+void ipa_endpoint_teardown(struct ipa *ipa);
+
+int ipa_endpoint_config(struct ipa *ipa);
+void ipa_endpoint_deconfig(struct ipa *ipa);
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
+void ipa_endpoint_default_route_clear(struct ipa *ipa);
+
+u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
+void ipa_endpoint_exit(struct ipa *ipa);
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+
+#endif /* _IPA_ENDPOINT_H_ */
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
new file mode 100644
index 000000000000..dc4a5c2196ae
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_data.h"
+
+void ipa_gsi_trans_complete(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_complete(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_trans_release(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_release(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_sent_queue(endpoint->netdev, byte_count);
+}
+
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_completed_queue(endpoint->netdev, count, byte_count);
+}
+
+/* Indicate whether an endpoint config data entry is "empty" */
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data)
+{
+ return data->ee_id == GSI_EE_AP && !data->channel.tlv_count;
+}
diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h
new file mode 100644
index 000000000000..3cf18600c68e
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_GSI_TRANS_H_
+#define _IPA_GSI_TRANS_H_
+
+#include <linux/types.h>
+
+struct gsi_trans;
+
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ * @trans: Transaction that has completed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction has completed.
+ */
+void ipa_gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_trans_release() - GSI transaction release callback
+ * @trans: Transaction whose resources should be freed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction is about to be freed, so any resources associated
+ * with it should be released.
+ */
+void ipa_gsi_trans_release(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_channel_tx_queued() - GSI queued to hardware notification
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions queued
+ * @byte_count: Number of bytes to transfer represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that some
+ * number of transactions have been queued to hardware for execution.
+ */
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ipa_gsi_channel_tx_completed()
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions completed since last report
+ * @byte_count: Number of bytes transferred represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that the hardware
+ * has reported the completion of some number of transactions.
+ */
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data);
+
+#endif /* _IPA_GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
new file mode 100644
index 000000000000..90353987c45f
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+/* DOC: IPA Interrupts
+ *
+ * The IPA has an interrupt line distinct from the interrupt used by the GSI
+ * code. Whereas GSI interrupts are generally related to channel events (like
+ * transfer completions), IPA interrupts are related to other events related
+ * to the IPA. Some of the IPA interrupts come from a microcontroller
+ * embedded in the IPA. Each IPA interrupt type can be both masked and
+ * acknowledged independent of the others.
+ *
+ * Two of the IPA interrupts are initiated by the microcontroller. A third
+ * can be generated to signal the need for a wakeup/resume when an IPA
+ * endpoint has been suspended. There are other IPA events, but at this
+ * time only these three are supported.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_reg.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+/**
+ * struct ipa_interrupt - IPA interrupt information
+ * @ipa: IPA pointer
+ * @irq: Linux IRQ number used for IPA interrupts
+ * @enabled: Mask indicating which interrupts are enabled
+ * @handler: Array of handlers indexed by IPA interrupt ID
+ */
+struct ipa_interrupt {
+ struct ipa *ipa;
+ u32 irq;
+ u32 enabled;
+ ipa_irq_handler_t handler[IPA_IRQ_COUNT];
+};
+
+/* Returns true if the interrupt type is associated with the microcontroller */
+static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1;
+}
+
+/* Process a particular interrupt type that has been received */
+static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(irq_id);
+
+ /* For microcontroller interrupts, clear the interrupt right away,
+ * "to avoid clearing unhandled interrupts."
+ */
+ if (uc_irq)
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+
+ if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
+ interrupt->handler[irq_id](interrupt->ipa, irq_id);
+
+ /* Clearing the SUSPEND_TX interrupt also clears the register
+ * that tells us which suspended endpoint(s) caused the interrupt,
+ * so defer clearing until after the handler has been called.
+ */
+ if (!uc_irq)
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+}
+
+/* Process all IPA interrupt types that have been signaled */
+static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 enabled = interrupt->enabled;
+ u32 mask;
+
+ /* The status register indicates which conditions are present,
+ * including conditions whose interrupt is not enabled. Handle
+ * only the enabled ones.
+ */
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ while ((mask &= enabled)) {
+ do {
+ u32 irq_id = __ffs(mask);
+
+ mask ^= BIT(irq_id);
+
+ ipa_interrupt_process(interrupt, irq_id);
+ } while (mask);
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ }
+}
+
+/* Threaded part of the IPA IRQ handler */
+static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+
+ ipa_clock_get(interrupt->ipa);
+
+ ipa_interrupt_process_all(interrupt);
+
+ ipa_clock_put(interrupt->ipa);
+
+ return IRQ_HANDLED;
+}
+
+/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */
+static irqreturn_t ipa_isr(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask;
+
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ if (mask & interrupt->enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Nothing in the mask was supposed to cause an interrupt */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+
+ dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n",
+ __func__, mask);
+
+ return IRQ_HANDLED;
+}
+
+/* Common function used to enable/disable TX_SUSPEND for an endpoint */
+static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
+ u32 endpoint_id, bool enable)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(endpoint_id);
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+ iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+}
+
+/* Enable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, true);
+}
+
+/* Disable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
+}
+
+/* Clear the suspend interrupt for all endpoints that signaled it */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 val;
+
+ val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET);
+}
+
+/* Simulate arrival of an IPA TX_SUSPEND interrupt */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
+{
+ ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
+}
+
+/* Add a handler for an IPA interrupt */
+void ipa_interrupt_add(struct ipa_interrupt *interrupt,
+ enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
+{
+ struct ipa *ipa = interrupt->ipa;
+
+ /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ interrupt->handler[ipa_irq] = handler;
+
+ /* Update the IPA interrupt mask to enable it */
+ interrupt->enabled |= BIT(ipa_irq);
+ iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+}
+
+/* Remove the handler for an IPA interrupt type */
+void
+ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
+{
+ struct ipa *ipa = interrupt->ipa;
+
+ /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ /* Update the IPA interrupt mask to disable it */
+ interrupt->enabled &= ~BIT(ipa_irq);
+ iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+
+ interrupt->handler[ipa_irq] = NULL;
+}
+
+/* Set up the IPA interrupt framework */
+struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct ipa_interrupt *interrupt;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(ipa->pdev, "ipa");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
+ ret);
+ return ERR_PTR(ret ? : -EINVAL);
+ }
+ irq = ret;
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->ipa = ipa;
+ interrupt->irq = irq;
+
+ /* Start with all IPA interrupts disabled */
+ iowrite32(0, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+
+ ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT,
+ "ipa", interrupt);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
+ goto err_kfree;
+ }
+
+ return interrupt;
+
+err_kfree:
+ kfree(interrupt);
+
+ return ERR_PTR(ret);
+}
+
+/* Tear down the IPA interrupt framework */
+void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
+{
+ free_irq(interrupt->irq, interrupt);
+ kfree(interrupt);
+}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
new file mode 100644
index 000000000000..d4f4c1c9f0b1
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_INTERRUPT_H_
+#define _IPA_INTERRUPT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct ipa;
+struct ipa_interrupt;
+
+/**
+ * enum ipa_irq_id - IPA interrupt type
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ *
+ * The data ready interrupt is signaled if data has arrived that is destined
+ * for an AP RX endpoint whose underlying GSI channel is suspended/stopped.
+ */
+enum ipa_irq_id {
+ IPA_IRQ_UC_0 = 2,
+ IPA_IRQ_UC_1 = 3,
+ IPA_IRQ_TX_SUSPEND = 14,
+ IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */
+};
+
+/**
+ * typedef ipa_irq_handler_t - IPA interrupt handler function type
+ * @ipa: IPA pointer
+ * @irq_id: interrupt type
+ *
+ * Callback function registered by ipa_interrupt_add() to handle a specific
+ * IPA interrupt type
+ */
+typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
+ * ipa_interrupt_add() - Register a handler for an IPA interrupt type
+ * @irq_id: IPA interrupt type
+ * @handler: Handler function for the interrupt
+ *
+ * Add a handler for an IPA interrupt and enable it. IPA interrupt
+ * handlers are run in threaded interrupt context, so are allowed to
+ * block.
+ */
+void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id irq_id,
+ ipa_irq_handler_t handler);
+
+/**
+ * ipa_interrupt_remove() - Remove the handler for an IPA interrupt type
+ * @interrupt: IPA interrupt structure
+ * @irq_id: IPA interrupt type
+ *
+ * Remove an IPA interrupt handler and disable it.
+ */
+void ipa_interrupt_remove(struct ipa_interrupt *interrupt,
+ enum ipa_irq_id irq_id);
+
+/**
+ * ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be enabled
+ *
+ * Note: The "TX" in the name is from the perspective of the IPA hardware.
+ * A TX_SUSPEND interrupt arrives on an AP RX enpoint when packet data can't
+ * be delivered to the endpoint because it is suspended (or its underlying
+ * channel is stopped).
+ */
+void ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_disable - Disable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be disabled
+ */
+void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
+ * @interrupt: IPA interrupt structure
+ *
+ * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
+ */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
+ * @interrupt: IPA interrupt structure
+ *
+ * This calls the TX_SUSPEND interrupt handler, as if such an interrupt
+ * had been signaled. This is needed to work around a hardware quirk
+ * that occurs if aggregation is active on an endpoint when its underlying
+ * channel is suspended.
+ */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_setup() - Set up the IPA interrupt framework
+ * @ipa: IPA pointer
+ *
+ * @Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_teardown() - Tear down the IPA interrupt framework
+ * @interrupt: IPA interrupt structure
+ */
+void ipa_interrupt_teardown(struct ipa_interrupt *interrupt);
+
+#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
new file mode 100644
index 000000000000..d6e7f257e99d
--- /dev/null
+++ b/drivers/net/ipa/ipa_main.c
@@ -0,0 +1,954 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/remoteproc.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_uc.h"
+#include "ipa_interrupt.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: The IP Accelerator
+ *
+ * This driver supports the Qualcomm IP Accelerator (IPA), which is a
+ * networking component found in many Qualcomm SoCs. The IPA is connected
+ * to the application processor (AP), but is also connected (and partially
+ * controlled by) other "execution environments" (EEs), such as a modem.
+ *
+ * The IPA is the conduit between the AP and the modem that carries network
+ * traffic. This driver presents a network interface representing the
+ * connection of the modem to external (e.g. LTE) networks.
+ *
+ * The IPA provides protocol checksum calculation, offloading this work
+ * from the AP. The IPA offers additional functionality, including routing,
+ * filtering, and NAT support, but that more advanced functionality is not
+ * currently supported. Despite that, some resources--including routing
+ * tables and filter tables--are defined in this driver because they must
+ * be initialized even when the advanced hardware features are not used.
+ *
+ * There are two distinct layers that implement the IPA hardware, and this
+ * is reflected in the organization of the driver. The generic software
+ * interface (GSI) is an integral component of the IPA, providing a
+ * well-defined communication layer between the AP subsystem and the IPA
+ * core. The GSI implements a set of "channels" used for communication
+ * between the AP and the IPA.
+ *
+ * The IPA layer uses GSI channels to implement its "endpoints". And while
+ * a GSI channel carries data between the AP and the IPA, a pair of IPA
+ * endpoints is used to carry traffic between two EEs. Specifically, the main
+ * modem network interface is implemented by two pairs of endpoints: a TX
+ * endpoint on the AP coupled with an RX endpoint on the modem; and another
+ * RX endpoint on the AP receiving data from a TX endpoint on the modem.
+ */
+
+/* The name of the GSI firmware file relative to /lib/firmware */
+#define IPA_FWS_PATH "ipa_fws.mdt"
+#define IPA_PAS_ID 15
+
+/**
+ * ipa_suspend_handler() - Handle the suspend IPA interrupt
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt type (unused)
+ *
+ * When in suspended state, the IPA can trigger a resume by sending a SUSPEND
+ * IPA interrupt.
+ */
+static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Take a a single clock reference to prevent suspend. All
+ * endpoints will be resumed as a result. This reference will
+ * be dropped when we get a power management suspend request.
+ */
+ if (!atomic_xchg(&ipa->suspend_ref, 1))
+ ipa_clock_get(ipa);
+
+ /* Acknowledge/clear the suspend interrupt on all endpoints */
+ ipa_interrupt_suspend_clear_all(ipa->interrupt);
+}
+
+/**
+ * ipa_setup() - Set up IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization that requires issuing immediate commands on
+ * the command TX endpoint. If the modem is doing GSI firmware load
+ * and initialization, this function will be called when an SMP2P
+ * interrupt has been signaled by the modem. Otherwise it will be
+ * called from ipa_probe() after GSI firmware has been successfully
+ * loaded, authenticated, and started by Trust Zone.
+ */
+int ipa_setup(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+ int ret;
+
+ /* IPA v4.0 and above don't use the doorbell engine. */
+ ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ if (ret)
+ return ret;
+
+ ipa->interrupt = ipa_interrupt_setup(ipa);
+ if (IS_ERR(ipa->interrupt)) {
+ ret = PTR_ERR(ipa->interrupt);
+ goto err_gsi_teardown;
+ }
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
+ ipa_suspend_handler);
+
+ ipa_uc_setup(ipa);
+
+ ipa_endpoint_setup(ipa);
+
+ /* We need to use the AP command TX endpoint to perform other
+ * initialization, so we enable first.
+ */
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ret = ipa_endpoint_enable_one(command_endpoint);
+ if (ret)
+ goto err_endpoint_teardown;
+
+ ret = ipa_mem_setup(ipa);
+ if (ret)
+ goto err_command_disable;
+
+ ret = ipa_table_setup(ipa);
+ if (ret)
+ goto err_mem_teardown;
+
+ /* Enable the exception handling endpoint, and tell the hardware
+ * to use it by default.
+ */
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ret = ipa_endpoint_enable_one(exception_endpoint);
+ if (ret)
+ goto err_table_teardown;
+
+ ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
+
+ /* We're all set. Now prepare for communication with the modem */
+ ret = ipa_modem_setup(ipa);
+ if (ret)
+ goto err_default_route_clear;
+
+ ipa->setup_complete = true;
+
+ dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
+
+ return 0;
+
+err_default_route_clear:
+ ipa_endpoint_default_route_clear(ipa);
+ ipa_endpoint_disable_one(exception_endpoint);
+err_table_teardown:
+ ipa_table_teardown(ipa);
+err_mem_teardown:
+ ipa_mem_teardown(ipa);
+err_command_disable:
+ ipa_endpoint_disable_one(command_endpoint);
+err_endpoint_teardown:
+ ipa_endpoint_teardown(ipa);
+ ipa_uc_teardown(ipa);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_teardown(ipa->interrupt);
+err_gsi_teardown:
+ gsi_teardown(&ipa->gsi);
+
+ return ret;
+}
+
+/**
+ * ipa_teardown() - Inverse of ipa_setup()
+ * @ipa: IPA pointer
+ */
+static void ipa_teardown(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+
+ ipa_modem_teardown(ipa);
+ ipa_endpoint_default_route_clear(ipa);
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_endpoint_disable_one(exception_endpoint);
+ ipa_table_teardown(ipa);
+ ipa_mem_teardown(ipa);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ipa_endpoint_disable_one(command_endpoint);
+ ipa_endpoint_teardown(ipa);
+ ipa_uc_teardown(ipa);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_teardown(ipa->interrupt);
+ gsi_teardown(&ipa->gsi);
+}
+
+/* Configure QMB Core Master Port selection */
+static void ipa_hardware_config_comp(struct ipa *ipa)
+{
+ u32 val;
+
+ /* Nothing to configure for IPA v3.5.1 */
+ if (ipa->version == IPA_VERSION_3_5_1)
+ return;
+
+ val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+
+ if (ipa->version == IPA_VERSION_4_0) {
+ val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
+ val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
+ val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ } else {
+ val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ }
+
+ val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
+ val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+}
+
+/* Configure DDR and PCIe max read/write QSB values */
+static void ipa_hardware_config_qsb(struct ipa *ipa)
+{
+ u32 val;
+
+ /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */
+ val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
+ if (ipa->version == IPA_VERSION_4_2)
+ val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK);
+ else
+ val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+
+ if (ipa->version == IPA_VERSION_3_5_1) {
+ val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK);
+ val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ } else {
+ val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK);
+ if (ipa->version == IPA_VERSION_4_2)
+ val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK);
+ else
+ val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ /* GEN_QMB_0_MAX_READS_BEATS is 0 */
+ /* GEN_QMB_1_MAX_READS_BEATS is 0 */
+ }
+ iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+}
+
+static void ipa_idle_indication_cfg(struct ipa *ipa,
+ u32 enter_idle_debounce_thresh,
+ bool const_non_idle_enable)
+{
+ u32 offset;
+ u32 val;
+
+ val = u32_encode_bits(enter_idle_debounce_thresh,
+ ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ if (const_non_idle_enable)
+ val |= CONST_NON_IDLE_ENABLE_FMASK;
+
+ offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/**
+ * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
+ *
+ * Configures when the IPA signals it is idle to the global clock
+ * controller, which can respond by scalling down the clock to
+ * save power.
+ */
+static void ipa_hardware_dcd_config(struct ipa *ipa)
+{
+ /* Recommended values for IPA 3.5 according to IPA HPG */
+ ipa_idle_indication_cfg(ipa, 256, false);
+}
+
+static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
+{
+ /* Power-on reset values */
+ ipa_idle_indication_cfg(ipa, 0, true);
+}
+
+/**
+ * ipa_hardware_config() - Primitive hardware initialization
+ * @ipa: IPA pointer
+ */
+static void ipa_hardware_config(struct ipa *ipa)
+{
+ u32 granularity;
+ u32 val;
+
+ /* Fill in backward-compatibility register, based on version */
+ val = ipa_reg_bcr_val(ipa->version);
+ iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ /* Enable open global clocks (hardware workaround) */
+ val = GLOBAL_FMASK;
+ val |= GLOBAL_2X_CLK_FMASK;
+ iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
+
+ /* Disable PA mask to allow HOLB drop (hardware workaround) */
+ val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ val &= ~PA_MASK_EN;
+ iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ }
+
+ ipa_hardware_config_comp(ipa);
+
+ /* Configure system bus limits */
+ ipa_hardware_config_qsb(ipa);
+
+ /* Configure aggregation granularity */
+ val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ val = u32_encode_bits(granularity, AGGR_GRANULARITY);
+ iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+
+ /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */
+ if (ipa->version == IPA_VERSION_4_2)
+ iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET);
+
+ /* Enable dynamic clock division */
+ ipa_hardware_dcd_config(ipa);
+}
+
+/**
+ * ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
+ * @ipa: IPA pointer
+ *
+ * This restores the power-on reset values (even if they aren't different)
+ */
+static void ipa_hardware_deconfig(struct ipa *ipa)
+{
+ /* Mostly we just leave things as we set them. */
+ ipa_hardware_dcd_deconfig(ipa);
+}
+
+#ifdef IPA_VALIDATION
+
+/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */
+static int ipa_resource_group_count(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_5_1:
+ return 3;
+
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = ipa_resource_group_count(ipa);
+ u32 i;
+ u32 j;
+
+ if (!group_count)
+ return false;
+
+ /* Return an error if a non-zero resource group limit is specified
+ * for a resource not supported by hardware.
+ */
+ for (i = 0; i < data->resource_src_count; i++) {
+ const struct ipa_resource_src *resource;
+
+ resource = &data->resource_src[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ const struct ipa_resource_dst *resource;
+
+ resource = &data->resource_dst[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ return true;
+}
+
+#else /* !IPA_VALIDATION */
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATION */
+
+static void
+ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ const struct ipa_resource_limits *xlimits,
+ const struct ipa_resource_limits *ylimits)
+{
+ u32 val;
+
+ val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
+ val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_resource_config_src_01(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
+{
+ u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[0], &resource->limits[1]);
+}
+
+static void ipa_resource_config_src_23(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
+{
+ u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[2], &resource->limits[3]);
+}
+
+static void ipa_resource_config_dst_01(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
+{
+ u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[0], &resource->limits[1]);
+}
+
+static void ipa_resource_config_dst_23(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
+{
+ u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[2], &resource->limits[3]);
+}
+
+static int
+ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+{
+ u32 i;
+
+ if (!ipa_resource_limits_valid(ipa, data))
+ return -EINVAL;
+
+ for (i = 0; i < data->resource_src_count; i++) {
+ ipa_resource_config_src_01(ipa, &data->resource_src[i]);
+ ipa_resource_config_src_23(ipa, &data->resource_src[i]);
+ }
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ ipa_resource_config_dst_01(ipa, &data->resource_dst[i]);
+ ipa_resource_config_dst_23(ipa, &data->resource_dst[i]);
+ }
+
+ return 0;
+}
+
+static void ipa_resource_deconfig(struct ipa *ipa)
+{
+ /* Nothing to do */
+}
+
+/**
+ * ipa_config() - Configure IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization requiring IPA clock to be enabled.
+ */
+static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
+{
+ int ret;
+
+ /* Get a clock reference to allow initialization. This reference
+ * is held after initialization completes, and won't get dropped
+ * unless/until a system suspend request arrives.
+ */
+ atomic_set(&ipa->suspend_ref, 1);
+ ipa_clock_get(ipa);
+
+ ipa_hardware_config(ipa);
+
+ ret = ipa_endpoint_config(ipa);
+ if (ret)
+ goto err_hardware_deconfig;
+
+ ret = ipa_mem_config(ipa);
+ if (ret)
+ goto err_endpoint_deconfig;
+
+ ipa_table_config(ipa);
+
+ /* Assign resource limitation to each group */
+ ret = ipa_resource_config(ipa, data->resource_data);
+ if (ret)
+ goto err_table_deconfig;
+
+ ret = ipa_modem_config(ipa);
+ if (ret)
+ goto err_resource_deconfig;
+
+ return 0;
+
+err_resource_deconfig:
+ ipa_resource_deconfig(ipa);
+err_table_deconfig:
+ ipa_table_deconfig(ipa);
+ ipa_mem_deconfig(ipa);
+err_endpoint_deconfig:
+ ipa_endpoint_deconfig(ipa);
+err_hardware_deconfig:
+ ipa_hardware_deconfig(ipa);
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+
+ return ret;
+}
+
+/**
+ * ipa_deconfig() - Inverse of ipa_config()
+ * @ipa: IPA pointer
+ */
+static void ipa_deconfig(struct ipa *ipa)
+{
+ ipa_modem_deconfig(ipa);
+ ipa_resource_deconfig(ipa);
+ ipa_table_deconfig(ipa);
+ ipa_mem_deconfig(ipa);
+ ipa_endpoint_deconfig(ipa);
+ ipa_hardware_deconfig(ipa);
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+}
+
+static int ipa_firmware_load(struct device *dev)
+{
+ const struct firmware *fw;
+ struct device_node *node;
+ struct resource res;
+ phys_addr_t phys;
+ ssize_t size;
+ void *virt;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "DT error getting \"memory-region\" property\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "error %d getting \"memory-region\" resource\n",
+ ret);
+ return ret;
+ }
+
+ ret = request_firmware(&fw, IPA_FWS_PATH, dev);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH);
+ return ret;
+ }
+
+ phys = res.start;
+ size = (size_t)resource_size(&res);
+ virt = memremap(phys, size, MEMREMAP_WC);
+ if (!virt) {
+ dev_err(dev, "unable to remap firmware memory\n");
+ ret = -ENOMEM;
+ goto out_release_firmware;
+ }
+
+ ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID,
+ virt, phys, size, NULL);
+ if (ret)
+ dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH);
+ else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
+ dev_err(dev, "error %d authenticating \"%s\"\n", ret,
+ IPA_FWS_PATH);
+
+ memunmap(virt);
+out_release_firmware:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static const struct of_device_id ipa_match[] = {
+ {
+ .compatible = "qcom,sdm845-ipa",
+ .data = &ipa_data_sdm845,
+ },
+ {
+ .compatible = "qcom,sc7180-ipa",
+ .data = &ipa_data_sc7180,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipa_match);
+
+static phandle of_property_read_phandle(const struct device_node *np,
+ const char *name)
+{
+ struct property *prop;
+ int len = 0;
+
+ prop = of_find_property(np, name, &len);
+ if (!prop || len != sizeof(__be32))
+ return 0;
+
+ return be32_to_cpup(prop->value);
+}
+
+/* Check things that can be validated at build time. This just
+ * groups these things BUILD_BUG_ON() calls don't clutter the rest
+ * of the code.
+ * */
+static void ipa_validate_build(void)
+{
+#ifdef IPA_VALIDATE
+ /* We assume we're working on 64-bit hardware */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT));
+
+ /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
+ BUILD_BUG_ON(GSI_EE_AP != 0);
+
+ /* There's no point if we have no channels or event rings */
+ BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
+ BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
+
+ /* GSI hardware design limits */
+ BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
+ BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
+
+ /* The number of TREs in a transaction is limited by the channel's
+ * TLV FIFO size. A transaction structure uses 8-bit fields
+ * to represents the number of TREs it has allocated and used.
+ */
+ BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
+
+ /* Exceeding 128 bytes makes the transaction pool *much* larger */
+ BUILD_BUG_ON(sizeof(struct gsi_trans) > 128);
+
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+#endif /* IPA_VALIDATE */
+}
+
+/**
+ * ipa_probe() - IPA platform driver probe function
+ * @pdev: Platform device pointer
+ *
+ * @Return: 0 if successful, or a negative error code (possibly
+ * EPROBE_DEFER)
+ *
+ * This is the main entry point for the IPA driver. Initialization proceeds
+ * in several stages:
+ * - The "init" stage involves activities that can be initialized without
+ * access to the IPA hardware.
+ * - The "config" stage requires the IPA clock to be active so IPA registers
+ * can be accessed, but does not require the use of IPA immediate commands.
+ * - The "setup" stage uses IPA immediate commands, and so requires the GSI
+ * layer to be initialized.
+ *
+ * A Boolean Device Tree "modem-init" property determines whether GSI
+ * initialization will be performed by the AP (Trust Zone) or the modem.
+ * If the AP does GSI initialization, the setup phase is entered after
+ * this has completed successfully. Otherwise the modem initializes
+ * the GSI layer and signals it has finished by sending an SMP2P interrupt
+ * to the AP; this triggers the start if IPA setup.
+ */
+static int ipa_probe(struct platform_device *pdev)
+{
+ struct wakeup_source *wakeup_source;
+ struct device *dev = &pdev->dev;
+ const struct ipa_data *data;
+ struct ipa_clock *clock;
+ struct rproc *rproc;
+ bool modem_alloc;
+ bool modem_init;
+ struct ipa *ipa;
+ phandle phandle;
+ bool prefetch;
+ int ret;
+
+ ipa_validate_build();
+
+ /* If we need Trust Zone, make sure it's available */
+ modem_init = of_property_read_bool(dev->of_node, "modem-init");
+ if (!modem_init)
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ /* We rely on remoteproc to tell us about modem state changes */
+ phandle = of_property_read_phandle(dev->of_node, "modem-remoteproc");
+ if (!phandle) {
+ dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
+ return -EINVAL;
+ }
+
+ rproc = rproc_get_by_phandle(phandle);
+ if (!rproc)
+ return -EPROBE_DEFER;
+
+ /* The clock and interconnects might not be ready when we're
+ * probed, so might return -EPROBE_DEFER.
+ */
+ clock = ipa_clock_init(dev);
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto err_rproc_put;
+ }
+
+ /* No more EPROBE_DEFER. Get our configuration data */
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ /* This is really IPA_VALIDATE (should never happen) */
+ dev_err(dev, "matched hardware not supported\n");
+ ret = -ENOTSUPP;
+ goto err_clock_exit;
+ }
+
+ /* Create a wakeup source. */
+ wakeup_source = wakeup_source_register(dev, "ipa");
+ if (!wakeup_source) {
+ /* The most likely reason for failure is memory exhaustion */
+ ret = -ENOMEM;
+ goto err_clock_exit;
+ }
+
+ /* Allocate and initialize the IPA structure */
+ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
+ if (!ipa) {
+ ret = -ENOMEM;
+ goto err_wakeup_source_unregister;
+ }
+
+ ipa->pdev = pdev;
+ dev_set_drvdata(dev, ipa);
+ ipa->modem_rproc = rproc;
+ ipa->clock = clock;
+ atomic_set(&ipa->suspend_ref, 0);
+ ipa->wakeup_source = wakeup_source;
+ ipa->version = data->version;
+
+ ret = ipa_reg_init(ipa);
+ if (ret)
+ goto err_kfree_ipa;
+
+ ret = ipa_mem_init(ipa, data->mem_count, data->mem_data);
+ if (ret)
+ goto err_reg_exit;
+
+ /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
+ prefetch = ipa->version != IPA_VERSION_3_5_1;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = ipa->version == IPA_VERSION_4_2;
+
+ ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
+ data->endpoint_data, modem_alloc);
+ if (ret)
+ goto err_mem_exit;
+
+ /* Result is a non-zero mask endpoints that support filtering */
+ ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
+ data->endpoint_data);
+ if (!ipa->filter_map) {
+ ret = -EINVAL;
+ goto err_gsi_exit;
+ }
+
+ ret = ipa_table_init(ipa);
+ if (ret)
+ goto err_endpoint_exit;
+
+ ret = ipa_modem_init(ipa, modem_init);
+ if (ret)
+ goto err_table_exit;
+
+ ret = ipa_config(ipa, data);
+ if (ret)
+ goto err_modem_exit;
+
+ dev_info(dev, "IPA driver initialized");
+
+ /* If the modem is doing early initialization, it will trigger a
+ * call to ipa_setup() call when it has finished. In that case
+ * we're done here.
+ */
+ if (modem_init)
+ return 0;
+
+ /* Otherwise we need to load the firmware and have Trust Zone validate
+ * and install it. If that succeeds we can proceed with setup.
+ */
+ ret = ipa_firmware_load(dev);
+ if (ret)
+ goto err_deconfig;
+
+ ret = ipa_setup(ipa);
+ if (ret)
+ goto err_deconfig;
+
+ return 0;
+
+err_deconfig:
+ ipa_deconfig(ipa);
+err_modem_exit:
+ ipa_modem_exit(ipa);
+err_table_exit:
+ ipa_table_exit(ipa);
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+err_gsi_exit:
+ gsi_exit(&ipa->gsi);
+err_mem_exit:
+ ipa_mem_exit(ipa);
+err_reg_exit:
+ ipa_reg_exit(ipa);
+err_kfree_ipa:
+ kfree(ipa);
+err_wakeup_source_unregister:
+ wakeup_source_unregister(wakeup_source);
+err_clock_exit:
+ ipa_clock_exit(clock);
+err_rproc_put:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int ipa_remove(struct platform_device *pdev)
+{
+ struct ipa *ipa = dev_get_drvdata(&pdev->dev);
+ struct rproc *rproc = ipa->modem_rproc;
+ struct ipa_clock *clock = ipa->clock;
+ struct wakeup_source *wakeup_source;
+ int ret;
+
+ wakeup_source = ipa->wakeup_source;
+
+ if (ipa->setup_complete) {
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ return ret;
+
+ ipa_teardown(ipa);
+ }
+
+ ipa_deconfig(ipa);
+ ipa_modem_exit(ipa);
+ ipa_table_exit(ipa);
+ ipa_endpoint_exit(ipa);
+ gsi_exit(&ipa->gsi);
+ ipa_mem_exit(ipa);
+ ipa_reg_exit(ipa);
+ kfree(ipa);
+ wakeup_source_unregister(wakeup_source);
+ ipa_clock_exit(clock);
+ rproc_put(rproc);
+
+ return 0;
+}
+
+/**
+ * ipa_suspend() - Power management system suspend callback
+ * @dev: IPA device structure
+ *
+ * @Return: Zero
+ *
+ * Called by the PM framework when a system suspend operation is invoked.
+ */
+static int ipa_suspend(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+
+ return 0;
+}
+
+/**
+ * ipa_resume() - Power management system resume callback
+ * @dev: IPA device structure
+ *
+ * @Return: Always returns 0
+ *
+ * Called by the PM framework when a system resume operation is invoked.
+ */
+static int ipa_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ /* This clock reference will keep the IPA out of suspend
+ * until we get a power management suspend request.
+ */
+ atomic_set(&ipa->suspend_ref, 1);
+ ipa_clock_get(ipa);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipa_pm_ops = {
+ .suspend_noirq = ipa_suspend,
+ .resume_noirq = ipa_resume,
+};
+
+static struct platform_driver ipa_driver = {
+ .probe = ipa_probe,
+ .remove = ipa_remove,
+ .driver = {
+ .name = "ipa",
+ .owner = THIS_MODULE,
+ .pm = &ipa_pm_ops,
+ .of_match_table = ipa_match,
+ },
+};
+
+module_platform_driver(ipa_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
new file mode 100644
index 000000000000..42d2c29d9f0c
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_data.h"
+#include "ipa_table.h"
+#include "gsi_trans.h"
+
+/* "Canary" value placed between memory regions to detect overflow */
+#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
+
+/* Add an immediate command to a transaction that zeroes a memory region */
+static void
+ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t addr = ipa->zero_addr;
+
+ if (!mem->size)
+ return;
+
+ ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
+}
+
+/**
+ * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
+ *
+ * Set up the shared memory regions in IPA local memory. This involves
+ * zero-filling memory regions, and in the case of header memory, telling
+ * the IPA where it's located.
+ *
+ * This function performs the initial setup of this memory. If the modem
+ * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
+ *
+ * The AP informs the modem where its portions of memory are located
+ * in a QMI exchange that occurs at modem startup.
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_setup(struct ipa *ipa)
+{
+ dma_addr_t addr = ipa->zero_addr;
+ struct gsi_trans *trans;
+ u32 offset;
+ u16 size;
+
+ /* Get a transaction to define the header memory region and to zero
+ * the processing context and modem memory regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ return -EBUSY;
+ }
+
+ /* Initialize IPA-local header memory. The modem and AP header
+ * regions are contiguous, and initialized together.
+ */
+ offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
+ size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ size += ipa->mem[IPA_MEM_AP_HEADER].size;
+
+ ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+
+ gsi_trans_commit_wait(trans);
+
+ /* Tell the hardware where the processing context area is located */
+ iowrite32(ipa->mem_offset + offset,
+ ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
+
+ return 0;
+}
+
+void ipa_mem_teardown(struct ipa *ipa)
+{
+ /* Nothing to do */
+}
+
+#ifdef IPA_VALIDATE
+
+static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ const struct ipa_mem *mem = &ipa->mem[mem_id];
+ struct device *dev = &ipa->pdev->dev;
+ u16 size_multiple;
+
+ /* Other than modem memory, sizes must be a multiple of 8 */
+ size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
+ if (mem->size % size_multiple)
+ dev_err(dev, "region %u size not a multiple of %u bytes\n",
+ mem_id, size_multiple);
+ else if (mem->offset % 8)
+ dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
+ else if (mem->offset < mem->canary_count * sizeof(__le32))
+ dev_err(dev, "region %u offset too small for %hu canaries\n",
+ mem_id, mem->canary_count);
+ else if (mem->offset + mem->size > ipa->mem_size)
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem_id, ipa->mem_size);
+ else
+ return true;
+
+ return false;
+}
+
+#else /* !IPA_VALIDATE */
+
+static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ return true;
+}
+
+#endif /*! IPA_VALIDATE */
+
+/**
+ * ipa_mem_config() - Configure IPA shared memory
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ dma_addr_t addr;
+ u32 mem_size;
+ void *virt;
+ u32 val;
+
+ /* Check the advertised location and size of the shared memory area */
+ val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+
+ /* The fields in the register are in 8 byte units */
+ ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ /* Make sure the end is within the region's mapped space */
+ mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+
+ /* If the sizes don't match, issue a warning */
+ if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
+ } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
+ mem_size);
+ ipa->mem_size = mem_size;
+ }
+
+ /* Prealloc DMA memory for zeroing regions */
+ virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+ ipa->zero_addr = addr;
+ ipa->zero_virt = virt;
+ ipa->zero_size = IPA_MEM_MAX;
+
+ /* Verify each defined memory region is valid, and if indicated
+ * for the region, write "canary" values in the space prior to
+ * the region's base address.
+ */
+ for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
+ const struct ipa_mem *mem = &ipa->mem[mem_id];
+ u16 canary_count;
+ __le32 *canary;
+
+ /* Validate all regions (even undefined ones) */
+ if (!ipa_mem_valid(ipa, mem_id))
+ goto err_dma_free;
+
+ /* Skip over undefined regions */
+ if (!mem->offset && !mem->size)
+ continue;
+
+ canary_count = mem->canary_count;
+ if (!canary_count)
+ continue;
+
+ /* Write canary values in the space before the region */
+ canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
+ do
+ *--canary = IPA_MEM_CANARY_VAL;
+ while (--canary_count);
+ }
+
+ /* Make sure filter and route table memory regions are valid */
+ if (!ipa_table_valid(ipa))
+ goto err_dma_free;
+
+ /* Validate memory-related properties relevant to immediate commands */
+ if (!ipa_cmd_data_valid(ipa))
+ goto err_dma_free;
+
+ /* Verify the microcontroller ring alignment (0 is OK too) */
+ if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
+ dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
+ goto err_dma_free;
+ }
+
+ return 0;
+
+err_dma_free:
+ dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
+
+ return -EINVAL;
+}
+
+/* Inverse of ipa_mem_config() */
+void ipa_mem_deconfig(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+
+ dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
+ ipa->zero_size = 0;
+ ipa->zero_virt = NULL;
+ ipa->zero_addr = 0;
+}
+
+/**
+ * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
+ *
+ * Zero regions of IPA-local memory used by the modem. These are configured
+ * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
+ * restarts via SSR we need to re-initialize them. A QMI message tells the
+ * modem where to find regions of IPA local memory it needs to know about
+ * (these included).
+ */
+int ipa_mem_zero_modem(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ /* Get a transaction to zero the modem memory, modem header,
+ * and modem processing context regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 3);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to zero modem memory\n");
+ return -EBUSY;
+ }
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Perform memory region-related initialization */
+int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+ int ret;
+
+ if (count > IPA_MEM_COUNT) {
+ dev_err(dev, "to many memory regions (%u > %u)\n",
+ count, IPA_MEM_COUNT);
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "error %d setting DMA mask\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-shared");
+ if (!res) {
+ dev_err(dev,
+ "DT error getting \"ipa-shared\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
+ if (!ipa->mem_virt) {
+ dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
+ return -ENOMEM;
+ }
+
+ ipa->mem_addr = res->start;
+ ipa->mem_size = resource_size(res);
+
+ /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
+ ipa->mem = mem;
+
+ return 0;
+}
+
+/* Inverse of ipa_mem_init() */
+void ipa_mem_exit(struct ipa *ipa)
+{
+ memunmap(ipa->mem_virt);
+}
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
new file mode 100644
index 000000000000..065cb499ebe5
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_MEM_H_
+#define _IPA_MEM_H_
+
+struct ipa;
+
+/**
+ * DOC: IPA Local Memory
+ *
+ * The IPA has a block of shared memory, divided into regions used for
+ * specific purposes.
+ *
+ * The regions within the shared block are bounded by an offset (relative to
+ * the "ipa-shared" memory range) and size found in the IPA_SHARED_MEM_SIZE
+ * register.
+ *
+ * Each region is optionally preceded by one or more 32-bit "canary" values.
+ * These are meant to detect out-of-range writes (if they become corrupted).
+ * A given region (such as a filter or routing table) has the same number
+ * of canaries for all IPA hardware versions. Still, the number used is
+ * defined in the config data, allowing for generic handling of regions.
+ *
+ * The set of memory regions is defined in configuration data. They are
+ * subject to these constraints:
+ * - a zero offset and zero size represents and undefined region
+ * - a region's offset is defined to be *past* all "canary" values
+ * - offset must be large enough to account for all canaries
+ * - a region's size may be zero, but may still have canaries
+ * - all offsets must be 8-byte aligned
+ * - most sizes must be a multiple of 8
+ * - modem memory size must be a multiple of 4
+ * - the microcontroller ring offset must be a multiple of 1024
+ */
+
+/* The maximum allowed size for any memory region */
+#define IPA_MEM_MAX (2 * PAGE_SIZE)
+
+/* IPA-resident memory region ids */
+enum ipa_mem_id {
+ IPA_MEM_UC_SHARED, /* 0 canaries */
+ IPA_MEM_UC_INFO, /* 0 canaries */
+ IPA_MEM_V4_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V4_FILTER, /* 2 canaries */
+ IPA_MEM_V6_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V6_FILTER, /* 2 canaries */
+ IPA_MEM_V4_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V4_ROUTE, /* 2 canaries */
+ IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V6_ROUTE, /* 2 canaries */
+ IPA_MEM_MODEM_HEADER, /* 2 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries */
+ IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
+ IPA_MEM_AP_PROC_CTX, /* 0 canaries */
+ IPA_MEM_PDN_CONFIG, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_QUOTA, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_MODEM, /* 0 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary */
+ IPA_MEM_COUNT, /* Number of regions (not an index) */
+};
+
+/**
+ * struct ipa_mem - IPA local memory region description
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count # 32-bit "canary" values that precede region
+ */
+struct ipa_mem {
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+int ipa_mem_config(struct ipa *ipa);
+void ipa_mem_deconfig(struct ipa *ipa);
+
+int ipa_mem_setup(struct ipa *ipa);
+void ipa_mem_teardown(struct ipa *ipa);
+
+int ipa_mem_zero_modem(struct ipa *ipa);
+
+int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem);
+void ipa_mem_exit(struct ipa *ipa);
+
+#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
new file mode 100644
index 000000000000..55c9329a4b1d
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_rmnet.h>
+#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_smp2p.h"
+#include "ipa_qmi.h"
+
+#define IPA_NETDEV_NAME "rmnet_ipa%d"
+#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
+#define IPA_NETDEV_TIMEOUT 10 /* seconds */
+
+enum ipa_modem_state {
+ IPA_MODEM_STATE_STOPPED = 0,
+ IPA_MODEM_STATE_STARTING,
+ IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING,
+};
+
+/** struct ipa_priv - IPA network device private data */
+struct ipa_priv {
+ struct ipa *ipa;
+};
+
+/** ipa_open() - Opens the modem network interface */
+static int ipa_open(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+ int ret;
+
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ if (ret)
+ return ret;
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ if (ret)
+ goto err_disable_tx;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_disable_tx:
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+
+ return ret;
+}
+
+/** ipa_stop() - Stops the modem network interface. */
+static int ipa_stop(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+
+ return 0;
+}
+
+/** ipa_start_xmit() - Transmits an skb.
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * NETDEV_TX_OK: Success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ */
+static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa_endpoint *endpoint;
+ struct ipa *ipa = priv->ipa;
+ u32 skb_len = skb->len;
+ int ret;
+
+ if (!skb_len)
+ goto err_drop_skb;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
+ goto err_drop_skb;
+
+ ret = ipa_endpoint_skb_tx(endpoint, skb);
+ if (ret) {
+ if (ret != -E2BIG)
+ return NETDEV_TX_BUSY;
+ goto err_drop_skb;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+
+err_drop_skb:
+ dev_kfree_skb_any(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &netdev->stats;
+
+ if (skb) {
+ skb->dev = netdev;
+ skb->protocol = htons(ETH_P_MAP);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ (void)netif_receive_skb(skb);
+ } else {
+ stats->rx_dropped++;
+ }
+}
+
+static const struct net_device_ops ipa_modem_ops = {
+ .ndo_open = ipa_open,
+ .ndo_stop = ipa_stop,
+ .ndo_start_xmit = ipa_start_xmit,
+};
+
+/** ipa_modem_netdev_setup() - netdev setup function for the modem */
+static void ipa_modem_netdev_setup(struct net_device *netdev)
+{
+ netdev->netdev_ops = &ipa_modem_ops;
+ ether_setup(netdev);
+ /* No header ops (override value set by ether_setup()) */
+ netdev->header_ops = NULL;
+ netdev->type = ARPHRD_RAWIP;
+ netdev->hard_header_len = 0;
+ netdev->max_mtu = IPA_MTU;
+ netdev->mtu = netdev->max_mtu;
+ netdev->addr_len = 0;
+ netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ /* The endpoint is configured for QMAP */
+ netdev->needed_headroom = sizeof(struct rmnet_map_header);
+ netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
+ netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
+ netdev->hw_features = NETIF_F_SG;
+}
+
+/** ipa_modem_suspend() - suspend callback
+ * @netdev: Network device
+ *
+ * Suspend the modem's endpoints.
+ */
+void ipa_modem_suspend(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+}
+
+/** ipa_modem_resume() - resume callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * Resume the modem's endpoints.
+ */
+void ipa_modem_resume(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+
+ netif_wake_queue(netdev);
+}
+
+int ipa_modem_start(struct ipa *ipa)
+{
+ enum ipa_modem_state state;
+ struct net_device *netdev;
+ struct ipa_priv *priv;
+ int ret;
+
+ /* Only attempt to start the modem if it's stopped */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
+ IPA_MODEM_STATE_STARTING);
+
+ /* Silently ignore attempts when running, or when changing state */
+ if (state != IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
+ NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
+ if (!netdev) {
+ ret = -ENOMEM;
+ goto out_set_state;
+ }
+
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv = netdev_priv(netdev);
+ priv->ipa = ipa;
+
+ ret = register_netdev(netdev);
+ if (ret)
+ free_netdev(netdev);
+ else
+ ipa->modem_netdev = netdev;
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+int ipa_modem_stop(struct ipa *ipa)
+{
+ struct net_device *netdev = ipa->modem_netdev;
+ enum ipa_modem_state state;
+ int ret;
+
+ /* Only attempt to stop the modem if it's running */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING);
+
+ /* Silently ignore attempts when already stopped */
+ if (state == IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ /* If we're somewhere between stopped and starting, we're busy */
+ if (state != IPA_MODEM_STATE_RUNNING)
+ return -EBUSY;
+
+ /* Prevent the modem from triggering a call to ipa_setup() */
+ ipa_smp2p_disable(ipa);
+
+ if (netdev) {
+ /* Stop the queue and disable the endpoints if it's open */
+ ret = ipa_stop(netdev);
+ if (ret)
+ goto out_set_state;
+
+ ipa->modem_netdev = NULL;
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ } else {
+ ret = 0;
+ }
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+/* Treat a "clean" modem stop the same as a crash */
+static void ipa_modem_crashed(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ipa_endpoint_modem_pause_all(ipa, true);
+
+ ipa_endpoint_modem_hol_block_clear_all(ipa);
+
+ ipa_table_reset(ipa, true);
+
+ ret = ipa_table_hash_flush(ipa);
+ if (ret)
+ dev_err(dev, "error %d flushing hash caches\n", ret);
+
+ ret = ipa_endpoint_modem_exception_reset_all(ipa);
+ if (ret)
+ dev_err(dev, "error %d resetting exception endpoint",
+ ret);
+
+ ipa_endpoint_modem_pause_all(ipa, false);
+
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ dev_err(dev, "error %d stopping modem", ret);
+
+ /* Now prepare for the next modem boot */
+ ret = ipa_mem_zero_modem(ipa);
+ if (ret)
+ dev_err(dev, "error %d zeroing modem memory regions\n", ret);
+}
+
+static void ipa_modem_notify(void *data, enum qcom_rproc_event event)
+{
+ struct ipa *ipa = data;
+ struct device *dev;
+
+ dev = &ipa->pdev->dev;
+ switch (event) {
+ case MODEM_STARTING:
+ dev_info(dev, "received modem starting event\n");
+ ipa_smp2p_notify_reset(ipa);
+ break;
+
+ case MODEM_RUNNING:
+ dev_info(dev, "received modem running event\n");
+ break;
+
+ case MODEM_STOPPING:
+ case MODEM_CRASHED:
+ dev_info(dev, "received modem %s event\n",
+ event == MODEM_STOPPING ? "stopping"
+ : "crashed");
+ if (ipa->setup_complete)
+ ipa_modem_crashed(ipa);
+ break;
+
+ case MODEM_OFFLINE:
+ dev_info(dev, "received modem offline event\n");
+ break;
+
+ case MODEM_REMOVING:
+ dev_info(dev, "received modem stopping event\n");
+ break;
+
+ default:
+ dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event);
+ break;
+ }
+}
+
+int ipa_modem_init(struct ipa *ipa, bool modem_init)
+{
+ return ipa_smp2p_init(ipa, modem_init);
+}
+
+void ipa_modem_exit(struct ipa *ipa)
+{
+ ipa_smp2p_exit(ipa);
+}
+
+int ipa_modem_config(struct ipa *ipa)
+{
+ return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify,
+ ipa);
+}
+
+void ipa_modem_deconfig(struct ipa *ipa)
+{
+ qcom_deregister_ipa_notify(ipa->modem_rproc);
+}
+
+int ipa_modem_setup(struct ipa *ipa)
+{
+ return ipa_qmi_setup(ipa);
+}
+
+void ipa_modem_teardown(struct ipa *ipa)
+{
+ ipa_qmi_teardown(ipa);
+}
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
new file mode 100644
index 000000000000..2de3e216d1d4
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_MODEM_H_
+#define _IPA_MODEM_H_
+
+struct ipa;
+struct ipa_endpoint;
+struct net_device;
+struct sk_buff;
+
+int ipa_modem_start(struct ipa *ipa);
+int ipa_modem_stop(struct ipa *ipa);
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
+
+void ipa_modem_suspend(struct net_device *netdev);
+void ipa_modem_resume(struct net_device *netdev);
+
+int ipa_modem_init(struct ipa *ipa, bool modem_init);
+void ipa_modem_exit(struct ipa *ipa);
+
+int ipa_modem_config(struct ipa *ipa);
+void ipa_modem_deconfig(struct ipa *ipa);
+
+int ipa_modem_setup(struct ipa *ipa);
+void ipa_modem_teardown(struct ipa *ipa);
+
+#endif /* _IPA_MODEM_H_ */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
new file mode 100644
index 000000000000..5090f0f923ad
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/qrtr.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_qmi_msg.h"
+
+/**
+ * DOC: AP/Modem QMI Handshake
+ *
+ * The AP and modem perform a "handshake" at initialization time to ensure
+ * both sides know when everything is ready to begin operating. The AP
+ * driver (this code) uses two QMI handles (endpoints) for this; a client
+ * using a service on the modem, and server to service modem requests (and
+ * to supply an indication message from the AP). Once the handshake is
+ * complete, the AP and modem may begin IPA operation. This occurs
+ * only when the AP IPA driver, modem IPA driver, and IPA microcontroller
+ * are ready.
+ *
+ * The QMI service on the modem expects to receive an INIT_DRIVER request from
+ * the AP, which contains parameters used by the modem during initialization.
+ * The AP sends this request as soon as it is knows the modem side service
+ * is available. The modem responds to this request, and if this response
+ * contains a success result, the AP knows the modem IPA driver is ready.
+ *
+ * The modem is responsible for loading firmware on the IPA microcontroller.
+ * This occurs only during the initial modem boot. The modem sends a
+ * separate DRIVER_INIT_COMPLETE request to the AP to report that the
+ * microcontroller is ready. The AP may assume the microcontroller is
+ * ready and remain so (even if the modem reboots) once it has received
+ * and responded to this request.
+ *
+ * There is one final exchange involved in the handshake. It is required
+ * on the initial modem boot, but optional (but in practice does occur) on
+ * subsequent boots. The modem expects to receive a final INIT_COMPLETE
+ * indication message from the AP when it is about to begin its normal
+ * operation. The AP will only send this message after it has received
+ * and responded to an INDICATION_REGISTER request from the modem.
+ *
+ * So in summary:
+ * - Whenever the AP learns the modem has booted and its IPA QMI service
+ * is available, it sends an INIT_DRIVER request to the modem. The
+ * modem supplies a success response when it is ready to operate.
+ * - On the initial boot, the modem sets up the IPA microcontroller, and
+ * sends a DRIVER_INIT_COMPLETE request to the AP when this is done.
+ * - When the modem is ready to receive an INIT_COMPLETE indication from
+ * the AP, it sends an INDICATION_REGISTER request to the AP.
+ * - On the initial modem boot, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - AP has responded to an INDICATION_REGISTER request from the modem
+ * - AP has sent an INIT_COMPLETE indication to the modem
+ * - On subsequent modem boots, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - The INDICATION_REGISTER request and INIT_COMPLETE indication are
+ * optional for non-initial modem boots, and have no bearing on the
+ * determination of when things are "ready"
+ */
+
+#define IPA_HOST_SERVICE_SVC_ID 0x31
+#define IPA_HOST_SVC_VERS 1
+#define IPA_HOST_SERVICE_INS_ID 1
+
+#define IPA_MODEM_SERVICE_SVC_ID 0x31
+#define IPA_MODEM_SERVICE_INS_ID 2
+#define IPA_MODEM_SVC_VERS 1
+
+#define QMI_INIT_DRIVER_TIMEOUT 60000 /* A minute in milliseconds */
+
+/* Send an INIT_COMPLETE indication message to the modem */
+static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct qmi_handle *qmi = &ipa_qmi->server_handle;
+ struct sockaddr_qrtr *sq = &ipa_qmi->modem_sq;
+ struct ipa_init_complete_ind ind = { };
+ int ret;
+
+ ind.status.result = QMI_RESULT_SUCCESS_V01;
+ ind.status.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_indication(qmi, sq, IPA_QMI_INIT_COMPLETE,
+ IPA_QMI_INIT_COMPLETE_IND_SZ,
+ ipa_init_complete_ind_ei, &ind);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete indication\n", ret);
+ else
+ ipa_qmi->indication_sent = true;
+}
+
+/* If requested (and not already sent) send the INIT_COMPLETE indication */
+static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
+{
+ if (!ipa_qmi->indication_requested)
+ return;
+
+ if (ipa_qmi->indication_sent)
+ return;
+
+ ipa_server_init_complete(ipa_qmi);
+}
+
+/* Determine whether everything is ready to start normal operation.
+ * We know everything (else) is ready when we know the IPA driver on
+ * the modem is ready, and the microcontroller is ready.
+ *
+ * When the modem boots (or reboots), the handshake sequence starts
+ * with the AP sending the modem an INIT_DRIVER request. Within
+ * that request, the uc_loaded flag will be zero (false) for an
+ * initial boot, non-zero (true) for a subsequent (SSR) boot.
+ */
+static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ int ret;
+
+ /* We aren't ready until the modem and microcontroller are */
+ if (!ipa_qmi->modem_ready || !ipa_qmi->uc_ready)
+ return;
+
+ /* Send the indication message if it was requested */
+ ipa_qmi_indication(ipa_qmi);
+
+ /* The initial boot requires us to send the indication. */
+ if (ipa_qmi->initial_boot) {
+ if (!ipa_qmi->indication_sent)
+ return;
+
+ /* The initial modem boot completed successfully */
+ ipa_qmi->initial_boot = false;
+ }
+
+ /* We're ready. Start up normal operation */
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ ret = ipa_modem_start(ipa);
+ if (ret)
+ dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+}
+
+/* All QMI clients from the modem node are gone (modem shut down or crashed). */
+static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+
+ /* The modem client and server go away at the same time */
+ memset(&ipa_qmi->modem_sq, 0, sizeof(ipa_qmi->modem_sq));
+
+ /* initial_boot doesn't change when modem reboots */
+ /* uc_ready doesn't change when modem reboots */
+ ipa_qmi->modem_ready = false;
+ ipa_qmi->indication_requested = false;
+ ipa_qmi->indication_sent = false;
+}
+
+static struct qmi_ops ipa_server_ops = {
+ .bye = ipa_server_bye,
+};
+
+/* Callback function to handle an INDICATION_REGISTER request message from the
+ * modem. This informs the AP that the modem is now ready to receive the
+ * INIT_COMPLETE indication message.
+ */
+static void ipa_server_indication_register(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_indication_register_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_INDICATION_REGISTER,
+ IPA_QMI_INDICATION_REGISTER_RSP_SZ,
+ ipa_indication_register_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->indication_requested = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending register indication response\n", ret);
+ }
+}
+
+/* Respond to a DRIVER_INIT_COMPLETE request message from the modem. */
+static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_driver_init_complete_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_DRIVER_INIT_COMPLETE,
+ IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ,
+ ipa_driver_init_complete_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->uc_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete response\n", ret);
+ }
+}
+
+/* The server handles two request message types sent by the modem. */
+static struct qmi_msg_handler ipa_server_msg_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_INDICATION_REGISTER,
+ .ei = ipa_indication_register_req_ei,
+ .decoded_size = IPA_QMI_INDICATION_REGISTER_REQ_SZ,
+ .fn = ipa_server_indication_register,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_DRIVER_INIT_COMPLETE,
+ .ei = ipa_driver_init_complete_req_ei,
+ .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
+ .fn = ipa_server_driver_init_complete,
+ },
+};
+
+/* Handle an INIT_DRIVER response message from the modem. */
+static void ipa_client_init_driver(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *decoded)
+{
+ txn->result = 0; /* IPA_QMI_INIT_DRIVER request was successful */
+ complete(&txn->completion);
+}
+
+/* The client handles one response message type sent by the modem. */
+static struct qmi_msg_handler ipa_client_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = IPA_QMI_INIT_DRIVER,
+ .ei = ipa_init_modem_driver_rsp_ei,
+ .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ,
+ .fn = ipa_client_init_driver,
+ },
+};
+
+/* Return a pointer to an init modem driver request structure, which contains
+ * configuration parameters for the modem. The modem may be started multiple
+ * times, but generally these parameters don't change so we can reuse the
+ * request structure once it's initialized. The only exception is the
+ * skip_uc_load field, which will be set only after the microcontroller has
+ * reported it has completed its initialization.
+ */
+static const struct ipa_init_modem_driver_req *
+init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ static struct ipa_init_modem_driver_req req;
+ const struct ipa_mem *mem;
+
+ /* The microcontroller is initialized on the first boot */
+ req.skip_uc_load_valid = 1;
+ req.skip_uc_load = ipa->uc_loaded ? 1 : 0;
+
+ /* We only have to initialize most of it once */
+ if (req.platform_type_valid)
+ return &req;
+
+ req.platform_type_valid = 1;
+ req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
+
+ mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ if (mem->size) {
+ req.hdr_tbl_info_valid = 1;
+ req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ req.v4_filter_tbl_start_valid = 1;
+ req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = &ipa->mem[IPA_MEM_V6_FILTER];
+ req.v6_filter_tbl_start_valid = 1;
+ req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = &ipa->mem[IPA_MEM_MODEM];
+ if (mem->size) {
+ req.modem_mem_info_valid = 1;
+ req.modem_mem_info.start = ipa->mem_offset + mem->offset;
+ req.modem_mem_info.size = mem->size;
+ }
+
+ req.ctrl_comm_dest_end_pt_valid = 1;
+ req.ctrl_comm_dest_end_pt =
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->endpoint_id;
+
+ /* skip_uc_load_valid and skip_uc_load are set above */
+
+ mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
+ if (mem->size) {
+ req.hdr_proc_ctx_tbl_info_valid = 1;
+ req.hdr_proc_ctx_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.hdr_proc_ctx_tbl_info.end =
+ req.hdr_proc_ctx_tbl_info.start + mem->size - 1;
+ }
+
+ /* Nothing to report for the compression table (zip_tbl_info) */
+
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
+ if (mem->size) {
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v4_hash_route_tbl_info.count =
+ mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+ if (mem->size) {
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v6_hash_route_tbl_info.count =
+ mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+ if (mem->size) {
+ req.v4_hash_filter_tbl_start_valid = 1;
+ req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
+ if (mem->size) {
+ req.v6_hash_filter_tbl_start_valid = 1;
+ req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ /* None of the stats fields are valid (IPA v4.0 and above) */
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ mem = &ipa->mem[IPA_MEM_STATS_QUOTA];
+ if (mem->size) {
+ req.hw_stats_quota_base_addr_valid = 1;
+ req.hw_stats_quota_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_quota_size_valid = 1;
+ req.hw_stats_quota_size = ipa->mem_offset + mem->size;
+ }
+
+ mem = &ipa->mem[IPA_MEM_STATS_DROP];
+ if (mem->size) {
+ req.hw_stats_drop_base_addr_valid = 1;
+ req.hw_stats_drop_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_drop_size_valid = 1;
+ req.hw_stats_drop_size = ipa->mem_offset + mem->size;
+ }
+ }
+
+ return &req;
+}
+
+/* Send an INIT_DRIVER request to the modem, and wait for it to complete. */
+static void ipa_client_init_driver_work(struct work_struct *work)
+{
+ unsigned long timeout = msecs_to_jiffies(QMI_INIT_DRIVER_TIMEOUT);
+ const struct ipa_init_modem_driver_req *req;
+ struct ipa_qmi *ipa_qmi;
+ struct qmi_handle *qmi;
+ struct qmi_txn txn;
+ struct device *dev;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
+ qmi = &ipa_qmi->client_handle,
+
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ dev = &ipa->pdev->dev;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error %d preparing init driver request\n", ret);
+ return;
+ }
+
+ /* Send the request, and if successful wait for its response */
+ req = init_modem_driver_req(ipa_qmi);
+ ret = qmi_send_request(qmi, &ipa_qmi->modem_sq, &txn,
+ IPA_QMI_INIT_DRIVER, IPA_QMI_INIT_DRIVER_REQ_SZ,
+ ipa_init_modem_driver_req_ei, req);
+ if (ret)
+ dev_err(dev, "error %d sending init driver request\n", ret);
+ else if ((ret = qmi_txn_wait(&txn, timeout)))
+ dev_err(dev, "error %d awaiting init driver response\n", ret);
+
+ if (!ret) {
+ ipa_qmi->modem_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ /* If any error occurs we need to cancel the transaction */
+ qmi_txn_cancel(&txn);
+ }
+}
+
+/* The modem server is now available. We will send an INIT_DRIVER request
+ * to the modem, but can't wait for it to complete in this callback thread.
+ * Schedule a worker on the global workqueue to do that for us.
+ */
+static int
+ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, client_handle);
+
+ ipa_qmi->modem_sq.sq_family = AF_QIPCRTR;
+ ipa_qmi->modem_sq.sq_node = svc->node;
+ ipa_qmi->modem_sq.sq_port = svc->port;
+
+ schedule_work(&ipa_qmi->init_driver_work);
+
+ return 0;
+}
+
+static struct qmi_ops ipa_client_ops = {
+ .new_server = ipa_client_new_server,
+};
+
+/* This is called by ipa_setup(). We can be informed via remoteproc that
+ * the modem has shut down, in which case this function will be called
+ * again to prepare for it coming back up again.
+ */
+int ipa_qmi_setup(struct ipa *ipa)
+{
+ struct ipa_qmi *ipa_qmi = &ipa->qmi;
+ int ret;
+
+ ipa_qmi->initial_boot = true;
+
+ /* The server handle is used to handle the DRIVER_INIT_COMPLETE
+ * request on the first modem boot. It also receives the
+ * INDICATION_REGISTER request on the first boot and (optionally)
+ * subsequent boots. The INIT_COMPLETE indication message is
+ * sent over the server handle if requested.
+ */
+ ret = qmi_handle_init(&ipa_qmi->server_handle,
+ IPA_QMI_SERVER_MAX_RCV_SZ, &ipa_server_ops,
+ ipa_server_msg_handlers);
+ if (ret)
+ return ret;
+
+ ret = qmi_add_server(&ipa_qmi->server_handle, IPA_HOST_SERVICE_SVC_ID,
+ IPA_HOST_SVC_VERS, IPA_HOST_SERVICE_INS_ID);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* The client handle is only used for sending an INIT_DRIVER request
+ * to the modem, and receiving its response message.
+ */
+ ret = qmi_handle_init(&ipa_qmi->client_handle,
+ IPA_QMI_CLIENT_MAX_RCV_SZ, &ipa_client_ops,
+ ipa_client_msg_handlers);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* We need this ready before the service lookup is added */
+ INIT_WORK(&ipa_qmi->init_driver_work, ipa_client_init_driver_work);
+
+ ret = qmi_add_lookup(&ipa_qmi->client_handle, IPA_MODEM_SERVICE_SVC_ID,
+ IPA_MODEM_SVC_VERS, IPA_MODEM_SERVICE_INS_ID);
+ if (ret)
+ goto err_client_handle_release;
+
+ return 0;
+
+err_client_handle_release:
+ /* Releasing the handle also removes registered lookups */
+ qmi_handle_release(&ipa_qmi->client_handle);
+ memset(&ipa_qmi->client_handle, 0, sizeof(ipa_qmi->client_handle));
+err_server_handle_release:
+ /* Releasing the handle also removes registered services */
+ qmi_handle_release(&ipa_qmi->server_handle);
+ memset(&ipa_qmi->server_handle, 0, sizeof(ipa_qmi->server_handle));
+
+ return ret;
+}
+
+void ipa_qmi_teardown(struct ipa *ipa)
+{
+ cancel_work_sync(&ipa->qmi.init_driver_work);
+
+ qmi_handle_release(&ipa->qmi.client_handle);
+ memset(&ipa->qmi.client_handle, 0, sizeof(ipa->qmi.client_handle));
+
+ qmi_handle_release(&ipa->qmi.server_handle);
+ memset(&ipa->qmi.server_handle, 0, sizeof(ipa->qmi.server_handle));
+}
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
new file mode 100644
index 000000000000..3993687593d0
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_H_
+#define _IPA_QMI_H_
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+struct ipa;
+
+/**
+ * struct ipa_qmi - QMI state associated with an IPA
+ * @client_handle - used to send an QMI requests to the modem
+ * @server_handle - used to handle QMI requests from the modem
+ * @initialized - whether QMI initialization has completed
+ * @indication_register_received - tracks modem request receipt
+ * @init_driver_response_received - tracks modem response receipt
+ */
+struct ipa_qmi {
+ struct qmi_handle client_handle;
+ struct qmi_handle server_handle;
+
+ /* Information used for the client handle */
+ struct sockaddr_qrtr modem_sq;
+ struct work_struct init_driver_work;
+
+ /* Flags used in negotiating readiness */
+ bool initial_boot;
+ bool uc_ready;
+ bool modem_ready;
+ bool indication_requested;
+ bool indication_sent;
+};
+
+int ipa_qmi_setup(struct ipa *ipa);
+void ipa_qmi_teardown(struct ipa *ipa);
+
+#endif /* !_IPA_QMI_H_ */
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
new file mode 100644
index 000000000000..03a1d0e55964
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#include <linux/stddef.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa_qmi_msg.h"
+
+/* QMI message structure definition for struct ipa_indication_register_req */
+struct qmi_elem_info ipa_indication_register_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_indication_register_rsp */
+struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_indication_register_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_req */
+struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_req,
+ status),
+ .tlv_type = 0x01,
+ .offset = offsetof(struct ipa_driver_init_complete_req,
+ status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
+struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .elem_size = offsetof(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_complete_ind */
+struct qmi_elem_info ipa_init_complete_ind_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_complete_ind,
+ status),
+ .tlv_type = 0x02,
+ .elem_size = offsetof(struct ipa_init_complete_ind,
+ status),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_bounds */
+struct qmi_elem_info ipa_mem_bounds_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, start),
+ .offset = offsetof(struct ipa_mem_bounds, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, end),
+ .offset = offsetof(struct ipa_mem_bounds, end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_array */
+struct qmi_elem_info ipa_mem_array_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, start),
+ .offset = offsetof(struct ipa_mem_array, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, count),
+ .offset = offsetof(struct ipa_mem_array, count),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_range */
+struct qmi_elem_info ipa_mem_range_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, start),
+ .offset = offsetof(struct ipa_mem_range, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, size),
+ .offset = offsetof(struct ipa_mem_range, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_req */
+struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ .tlv_type = 0x10,
+ .elem_size = offsetof(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ platform_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .ei_array = ipa_mem_range_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
+struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
new file mode 100644
index 000000000000..cfac456cea0c
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_MSG_H_
+#define _IPA_QMI_MSG_H_
+
+/* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+/* Request/response/indication QMI message ids used for IPA. Receiving
+ * end issues a response for requests; indications require no response.
+ */
+#define IPA_QMI_INDICATION_REGISTER 0x20 /* modem -> AP request */
+#define IPA_QMI_INIT_DRIVER 0x21 /* AP -> modem request */
+#define IPA_QMI_INIT_COMPLETE 0x22 /* AP -> modem indication */
+#define IPA_QMI_DRIVER_INIT_COMPLETE 0x35 /* modem -> AP request */
+
+/* The maximum size required for message types. These sizes include
+ * the message data, along with type (1 byte) and length (2 byte)
+ * information for each field. The qmi_send_*() interfaces require
+ * the message size to be provided.
+ */
+#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 12 /* -> server handle */
+#define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */
+#define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */
+#define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */
+#define IPA_QMI_INIT_COMPLETE_IND_SZ 7 /* <- server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ 4 /* -> server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ 7 /* <- server handle */
+
+/* Maximum size of messages we expect the AP to receive (max of above) */
+#define IPA_QMI_SERVER_MAX_RCV_SZ 8
+#define IPA_QMI_CLIENT_MAX_RCV_SZ 25
+
+/* Request message for the IPA_QMI_INDICATION_REGISTER request */
+struct ipa_indication_register_req {
+ u8 master_driver_init_complete_valid;
+ u8 master_driver_init_complete;
+ u8 data_usage_quota_reached_valid;
+ u8 data_usage_quota_reached;
+ u8 ipa_mhi_ready_ind_valid;
+ u8 ipa_mhi_ready_ind;
+};
+
+/* The response to a IPA_QMI_INDICATION_REGISTER request consists only of
+ * a standard QMI response.
+ */
+struct ipa_indication_register_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* Request message for the IPA_QMI_DRIVER_INIT_COMPLETE request */
+struct ipa_driver_init_complete_req {
+ u8 status;
+};
+
+/* The response to a IPA_QMI_DRIVER_INIT_COMPLETE request consists only
+ * of a standard QMI response.
+ */
+struct ipa_driver_init_complete_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* The message for the IPA_QMI_INIT_COMPLETE_IND indication consists
+ * only of a standard QMI response.
+ */
+struct ipa_init_complete_ind {
+ struct qmi_response_type_v01 status;
+};
+
+/* The AP tells the modem its platform type. We assume Android. */
+enum ipa_platform_type {
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+};
+
+/* This defines the start and end offset of a range of memory. Both
+ * fields are offsets relative to the start of IPA shared memory.
+ * The end value is the last addressable byte *within* the range.
+ */
+struct ipa_mem_bounds {
+ u32 start;
+ u32 end;
+};
+
+/* This defines the location and size of an array. The start value
+ * is an offset relative to the start of IPA shared memory. The
+ * size of the array is implied by the number of entries (the entry
+ * size is assumed to be known).
+ */
+struct ipa_mem_array {
+ u32 start;
+ u32 count;
+};
+
+/* This defines the location and size of a range of memory. The
+ * start is an offset relative to the start of IPA shared memory.
+ * This differs from the ipa_mem_bounds structure in that the size
+ * (in bytes) of the memory region is specified rather than the
+ * offset of its last byte.
+ */
+struct ipa_mem_range {
+ u32 start;
+ u32 size;
+};
+
+/* The message for the IPA_QMI_INIT_DRIVER request contains information
+ * from the AP that affects modem initialization.
+ */
+struct ipa_init_modem_driver_req {
+ u8 platform_type_valid;
+ u32 platform_type; /* enum ipa_platform_type */
+
+ /* Modem header table information. This defines the IPA shared
+ * memory in which the modem may insert header table entries.
+ */
+ u8 hdr_tbl_info_valid;
+ struct ipa_mem_bounds hdr_tbl_info;
+
+ /* Routing table information. These define the location and size of
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_route_tbl_info_valid;
+ struct ipa_mem_array v4_route_tbl_info;
+ u8 v6_route_tbl_info_valid;
+ struct ipa_mem_array v6_route_tbl_info;
+
+ /* Filter table information. These define the location of the
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_filter_tbl_start_valid;
+ u32 v4_filter_tbl_start;
+ u8 v6_filter_tbl_start_valid;
+ u32 v6_filter_tbl_start;
+
+ /* Modem memory information. This defines the location and
+ * size of memory available for the modem to use.
+ */
+ u8 modem_mem_info_valid;
+ struct ipa_mem_range modem_mem_info;
+
+ /* This defines the destination endpoint on the AP to which
+ * the modem driver can send control commands. Must be less
+ * than ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines whether the modem should load the microcontroller
+ * or not. It is unnecessary to reload it if the modem is being
+ * restarted.
+ *
+ * NOTE: this field is named "is_ssr_bootup" elsewhere.
+ */
+ u8 skip_uc_load_valid;
+ u8 skip_uc_load;
+
+ /* Processing context memory information. This defines the memory in
+ * which the modem may insert header processing context table entries.
+ */
+ u8 hdr_proc_ctx_tbl_info_valid;
+ struct ipa_mem_bounds hdr_proc_ctx_tbl_info;
+
+ /* Compression command memory information. This defines the memory
+ * in which the modem may insert compression/decompression commands.
+ */
+ u8 zip_tbl_info_valid;
+ struct ipa_mem_bounds zip_tbl_info;
+
+ /* Routing table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_route_tbl_info_valid;
+ struct ipa_mem_array v4_hash_route_tbl_info;
+ u8 v6_hash_route_tbl_info_valid;
+ struct ipa_mem_array v6_hash_route_tbl_info;
+
+ /* Filter table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_filter_tbl_start_valid;
+ u32 v4_hash_filter_tbl_start;
+ u8 v6_hash_filter_tbl_start_valid;
+ u32 v6_hash_filter_tbl_start;
+
+ /* Statistics information. These define the locations of the
+ * first and last statistics sub-regions. (IPA v4.0 and above)
+ */
+ u8 hw_stats_quota_base_addr_valid;
+ u32 hw_stats_quota_base_addr;
+ u8 hw_stats_quota_size_valid;
+ u32 hw_stats_quota_size;
+ u8 hw_stats_drop_base_addr_valid;
+ u32 hw_stats_drop_base_addr;
+ u8 hw_stats_drop_size_valid;
+ u32 hw_stats_drop_size;
+};
+
+/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
+ * QMI response, but contains other information as well. Currently we
+ * simply wait for the the INIT_DRIVER transaction to complete and
+ * ignore any other data that might be returned.
+ */
+struct ipa_init_modem_driver_rsp {
+ struct qmi_response_type_v01 rsp;
+
+ /* This defines the destination endpoint on the modem to which
+ * the AP driver can send control commands. Must be less than
+ * ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines the default endpoint. The AP driver is not
+ * required to configure the hardware with this value. Must
+ * be less than ipa_endpoint_max().
+ */
+ u8 default_end_pt_valid;
+ u32 default_end_pt;
+
+ /* This defines whether a second handshake is required to complete
+ * initialization.
+ */
+ u8 modem_driver_init_pending_valid;
+ u8 modem_driver_init_pending;
+};
+
+/* Message structure definitions defined in "ipa_qmi_msg.c" */
+extern struct qmi_elem_info ipa_indication_register_req_ei[];
+extern struct qmi_elem_info ipa_indication_register_rsp_ei[];
+extern struct qmi_elem_info ipa_driver_init_complete_req_ei[];
+extern struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
+extern struct qmi_elem_info ipa_init_complete_ind_ei[];
+extern struct qmi_elem_info ipa_mem_bounds_ei[];
+extern struct qmi_elem_info ipa_mem_array_ei[];
+extern struct qmi_elem_info ipa_mem_range_ei[];
+extern struct qmi_elem_info ipa_init_modem_driver_req_ei[];
+extern struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
+
+#endif /* !_IPA_QMI_MSG_H_ */
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
new file mode 100644
index 000000000000..e6147a1cd787
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+
+int ipa_reg_init(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+
+ /* Setup IPA register memory */
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-reg");
+ if (!res) {
+ dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->reg_virt = ioremap(res->start, resource_size(res));
+ if (!ipa->reg_virt) {
+ dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
+ return -ENOMEM;
+ }
+ ipa->reg_addr = res->start;
+
+ return 0;
+}
+
+void ipa_reg_exit(struct ipa *ipa)
+{
+ iounmap(ipa->reg_virt);
+}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
new file mode 100644
index 000000000000..3b8106aa277a
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.h
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_REG_H_
+#define _IPA_REG_H_
+
+#include <linux/bitfield.h>
+
+#include "ipa_version.h"
+
+struct ipa;
+
+/**
+ * DOC: IPA Registers
+ *
+ * IPA registers are located within the "ipa-reg" address space defined by
+ * Device Tree. The offset of each register within that space is specified
+ * by symbols defined below. The address space is mapped to virtual memory
+ * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ *
+ * Certain register types are duplicated for a number of instances of
+ * something. For example, each IPA endpoint has an set of registers
+ * defining its configuration. The offset to an endpoint's set of registers
+ * is computed based on an "base" offset, plus an endpoint's ID multiplied
+ * and a "stride" value for the register. For such registers, the offset is
+ * computed by a function-like macro that takes a parameter used in the
+ * computation.
+ *
+ * Some register offsets depend on execution environment. For these an "ee"
+ * parameter is supplied to the offset macro. The "ee" value is a member of
+ * the gsi_ee enumerated type.
+ *
+ * The offset of a register dependent on endpoint id is computed by a macro
+ * that is supplied a parameter "ep". The "ep" value is assumed to be less
+ * than the maximum endpoint value for the current hardware, and that will
+ * not exceed IPA_ENDPOINT_MAX.
+ *
+ * The offset of registers related to filter and route tables is computed
+ * by a macro that is supplied a parameter "er". The "er" represents an
+ * endpoint ID for filters, or a route ID for routes. For filters, the
+ * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
+ * because not all endpoints support filtering. For routes, the route ID
+ * must be less than IPA_ROUTE_MAX.
+ *
+ * The offset of registers related to resource types is computed by a macro
+ * that is supplied a parameter "rt". The "rt" represents a resource type,
+ * which is is a member of the ipa_resource_type_src enumerated type for
+ * source endpoint resources or the ipa_resource_type_dst enumerated type
+ * for destination endpoint resources.
+ *
+ * Some registers encode multiple fields within them. For these, each field
+ * has a symbol below defining a field mask that encodes both the position
+ * and width of the field within its register.
+ *
+ * In some cases, different versions of IPA hardware use different offset or
+ * field mask values. In such cases an inline_function(ipa) is used rather
+ * than a MACRO to define the offset or field mask to use.
+ *
+ * Finally, some registers hold bitmasks representing endpoints. In such
+ * cases the @available field in the @ipa structure defines the "full" set
+ * of valid bits for the register.
+ */
+
+#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038
+
+#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
+#define ENABLE_FMASK GENMASK(0, 0)
+#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
+#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
+#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
+#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
+#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
+#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
+#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
+#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
+#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
+#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
+#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
+#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
+#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
+#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
+#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
+#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
+#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17)
+
+#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
+#define RX_FMASK GENMASK(0, 0)
+#define PROC_FMASK GENMASK(1, 1)
+#define TX_WRAPPER_FMASK GENMASK(2, 2)
+#define MISC_FMASK GENMASK(3, 3)
+#define RAM_ARB_FMASK GENMASK(4, 4)
+#define FTCH_HPS_FMASK GENMASK(5, 5)
+#define FTCH_DPS_FMASK GENMASK(6, 6)
+#define HPS_FMASK GENMASK(7, 7)
+#define DPS_FMASK GENMASK(8, 8)
+#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
+#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
+#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
+#define RSRC_MNGR_FMASK GENMASK(12, 12)
+#define CTX_HANDLER_FMASK GENMASK(13, 13)
+#define ACK_MNGR_FMASK GENMASK(14, 14)
+#define D_DCPH_FMASK GENMASK(15, 15)
+#define H_DCPH_FMASK GENMASK(16, 16)
+#define DCMP_FMASK GENMASK(17, 17)
+#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
+#define TX_0_FMASK GENMASK(19, 19)
+#define TX_1_FMASK GENMASK(20, 20)
+#define FNR_FMASK GENMASK(21, 21)
+#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
+#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
+#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
+#define QMB_FMASK GENMASK(25, 25)
+#define WEIGHT_ARB_FMASK GENMASK(26, 26)
+#define GSI_IF_FMASK GENMASK(27, 27)
+#define GLOBAL_FMASK GENMASK(28, 28)
+#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
+
+#define IPA_REG_ROUTE_OFFSET 0x00000048
+#define ROUTE_DIS_FMASK GENMASK(0, 0)
+#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
+#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
+#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
+#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
+#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
+
+#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
+#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
+#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
+
+#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
+#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
+#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
+
+#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
+#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
+#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
+/* The next two fields are present for IPA v4.0 and above */
+#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
+#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
+
+static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000010c;
+
+ return 0x000000b4;
+}
+/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
+
+/* The next register is present for IPA v4.2 and above */
+#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148
+#define IPV6_ROUTER_HASH_EN GENMASK(0, 0)
+#define IPV6_FILTER_HASH_EN GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_EN GENMASK(8, 8)
+#define IPV4_FILTER_HASH_EN GENMASK(12, 12)
+
+static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000090;
+
+ return 0x000014c;
+}
+
+#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0)
+#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8)
+#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12)
+
+#define IPA_REG_BCR_OFFSET 0x000001d0
+#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0)
+#define BCR_TX_NOT_USING_BRESP BIT(1)
+#define BCR_SUSPEND_L2_IRQ BIT(3)
+#define BCR_HOLB_DROP_L2_IRQ BIT(4)
+#define BCR_DUAL_TX BIT(5)
+
+/* Backward compatibility register value to use for each version */
+static inline u32 ipa_reg_bcr_val(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP |
+ BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+
+ if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
+ return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ |
+ BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+
+ return 0x00000000;
+}
+
+
+#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
+
+#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
+/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+
+#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+#define AGGR_GRANULARITY GENMASK(8, 4)
+/* Compute the value to use in the AGGR_GRANULARITY field representing
+ * the given number of microseconds (up to 1 millisecond).
+ * x = (32 * usec) / 1000 - 1
+ */
+static inline u32 ipa_aggr_granularity_val(u32 microseconds)
+{
+ /* assert(microseconds >= 16); (?) */
+ /* assert(microseconds <= 1015); */
+
+ return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1;
+}
+
+#define IPA_REG_TX_CFG_OFFSET 0x000001fc
+/* The first three fields are present for IPA v3.5.1 only */
+#define TX0_PREFETCH_DISABLE GENMASK(0, 0)
+#define TX1_PREFETCH_DISABLE GENMASK(1, 1)
+#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2)
+/* The next fields are present for IPA v4.0 and above */
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2)
+#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6)
+#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10)
+#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11)
+#define PA_MASK_EN GENMASK(12, 12)
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13)
+/* The last two fields are present for IPA v4.2 and above */
+#define SSPND_PA_NO_START_STATE GENMASK(18, 18)
+#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19)
+
+#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
+#define BAM_MAX_PIPES_FMASK GENMASK(4, 0)
+#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
+#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
+#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24)
+
+static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_4_2)
+ return 0x00000240;
+
+ return 0x00000220;
+}
+
+#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
+#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
+
+#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000400 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000404 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000408 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000500 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000504 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000508 + 0x0020 * (rt))
+#define X_MIN_LIM_FMASK GENMASK(5, 0)
+#define X_MAX_LIM_FMASK GENMASK(13, 8)
+#define Y_MIN_LIM_FMASK GENMASK(21, 16)
+#define Y_MAX_LIM_FMASK GENMASK(29, 24)
+
+#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
+ (0x00000800 + 0x0070 * (ep))
+#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
+#define ENDP_DELAY_FMASK GENMASK(1, 1)
+
+#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
+ (0x00000808 + 0x0070 * (ep))
+#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
+#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
+#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
+#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+
+#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
+ (0x00000810 + 0x0070 * (ep))
+#define HDR_LEN_FMASK GENMASK(5, 0)
+#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
+#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
+#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
+#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
+#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
+#define HDR_A5_MUX_FMASK GENMASK(26, 26)
+#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
+#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
+
+#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
+ (0x00000814 + 0x0070 * (ep))
+#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
+#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
+#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
+#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
+#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
+#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
+
+#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \
+ (0x00000818 + 0x0070 * (ep))
+
+#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \
+ (0x00000820 + 0x0070 * (ep))
+#define MODE_FMASK GENMASK(2, 0)
+#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
+#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
+#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
+#define PAD_EN_FMASK GENMASK(29, 29)
+#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+
+#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
+ (0x00000824 + 0x0070 * (ep))
+#define AGGR_EN_FMASK GENMASK(1, 0)
+#define AGGR_TYPE_FMASK GENMASK(4, 2)
+#define AGGR_BYTE_LIMIT_FMASK GENMASK(9, 5)
+#define AGGR_TIME_LIMIT_FMASK GENMASK(14, 10)
+#define AGGR_PKT_LIMIT_FMASK GENMASK(20, 15)
+#define AGGR_SW_EOF_ACTIVE_FMASK GENMASK(21, 21)
+#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
+#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
+
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \
+ (0x0000082c + 0x0070 * (ep))
+#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
+
+/* The next register is valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \
+ (0x00000830 + 0x0070 * (ep))
+/* The next fields are present for IPA v4.2 only */
+#define BASE_VALUE_FMASK GENMASK(4, 0)
+#define SCALE_FMASK GENMASK(12, 8)
+
+#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \
+ (0x00000834 + 0x0070 * (ep))
+#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
+#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
+#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
+#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
+
+#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
+ (0x00000838 + 0x0070 * (ep))
+#define RSRC_GRP_FMASK GENMASK(1, 0)
+
+#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \
+ (0x0000083c + 0x0070 * (ep))
+#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0)
+#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4)
+#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
+#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+
+#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
+ (0x00000840 + 0x0070 * (ep))
+#define STATUS_EN_FMASK GENMASK(0, 0)
+#define STATUS_ENDP_FMASK GENMASK(5, 1)
+#define STATUS_LOCATION_FMASK GENMASK(8, 8)
+/* The next field is present for IPA v4.0 and above */
+#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
+
+/* "er" is either an endpoint id (for filters) or a route id (for routes) */
+#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
+ (0x0000085c + 0x0070 * (er))
+#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
+#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
+#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
+#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
+#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
+#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
+#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
+#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
+
+#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
+#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
+#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
+#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
+#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
+#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
+#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
+#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
+
+#define IPA_REG_IRQ_STTS_OFFSET \
+ IPA_REG_IRQ_STTS_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_STTS_EE_N_OFFSET(ee) \
+ (0x00003008 + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_EN_OFFSET \
+ IPA_REG_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_EN_EE_N_OFFSET(ee) \
+ (0x0000300c + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_CLR_OFFSET \
+ IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
+ (0x00003010 + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_UC_OFFSET \
+ IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
+ (0x0000301c + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
+ IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
+ (0x00003030 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_INFO register */
+
+#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \
+ IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \
+ (0x00003034 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */
+
+#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \
+ IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \
+ (0x00003038 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */
+
+/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0,
+ IPA_CS_OFFLOAD_UL = 1,
+ IPA_CS_OFFLOAD_DL = 2,
+ IPA_CS_RSVD
+};
+
+/** enum ipa_aggr_en - aggregation type field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR = 0,
+ IPA_ENABLE_AGGR = 1,
+ IPA_ENABLE_DEAGGR = 2,
+};
+
+/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0,
+ IPA_HDLC = 1,
+ IPA_TLP = 2,
+ IPA_RNDIS = 3,
+ IPA_GENERIC = 4,
+ IPA_COALESCE = 5,
+ IPA_QCMAP = 6,
+};
+
+/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+enum ipa_mode {
+ IPA_BASIC = 0,
+ IPA_ENABLE_FRAMING_HDLC = 1,
+ IPA_ENABLE_DEFRAMING_HDLC = 2,
+ IPA_DMA = 3,
+};
+
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N
+ * @IPA_SEQ_DMA_ONLY: only DMA is performed
+ * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP:
+ * packet processing + no decipher + microcontroller (Ethernet Bridging)
+ * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
+ * second packet processing pass + no decipher + microcontroller
+ * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher
+ * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression
+ * @IPA_SEQ_INVALID: invalid sequencer type
+ *
+ * The values defined here are broken into 4-bit nibbles that are written
+ * into fields of the INIT_SEQ_N endpoint registers.
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA_ONLY = 0x0000,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002,
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
+ IPA_SEQ_DMA_DEC = 0x0011,
+ IPA_SEQ_DMA_COMP_DECOMP = 0x0020,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
+ IPA_SEQ_INVALID = 0xffff,
+};
+
+int ipa_reg_init(struct ipa *ipa);
+void ipa_reg_exit(struct ipa *ipa);
+
+#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
new file mode 100644
index 000000000000..4d33aa7ebfbb
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "ipa_smp2p.h"
+#include "ipa.h"
+#include "ipa_uc.h"
+#include "ipa_clock.h"
+
+/**
+ * DOC: IPA SMP2P communication with the modem
+ *
+ * SMP2P is a primitive communication mechanism available between the AP and
+ * the modem. The IPA driver uses this for two purposes: to enable the modem
+ * to state that the GSI hardware is ready to use; and to communicate the
+ * state of the IPA clock in the event of a crash.
+ *
+ * GSI needs to have early initialization completed before it can be used.
+ * This initialization is done either by Trust Zone or by the modem. In the
+ * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver
+ * when the GSI is ready to use.
+ *
+ * The modem is also able to inquire about the current state of the IPA
+ * clock by trigging another SMP2P interrupt to the AP. We communicate
+ * whether the clock is enabled using two SMP2P state bits--one to
+ * indicate the clock state (on or off), and a second to indicate the
+ * clock state bit is valid. The modem will poll the valid bit until it
+ * is set, and at that time records whether the AP has the IPA clock enabled.
+ *
+ * Finally, if the AP kernel panics, we update the SMP2P state bits even if
+ * we never receive an interrupt from the modem requesting this.
+ */
+
+/**
+ * struct ipa_smp2p - IPA SMP2P information
+ * @ipa: IPA pointer
+ * @valid_state: SMEM state indicating enabled state is valid
+ * @enabled_state: SMEM state to indicate clock is enabled
+ * @valid_bit: Valid bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @clock_query_irq: IPA interrupt triggered by modem for clock query
+ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
+ * @clock_on: Whether IPA clock is on
+ * @notified: Whether modem has been notified of clock state
+ * @disabled: Whether setup ready interrupt handling is disabled
+ * @mutex mutex: Motex protecting ready interrupt/shutdown interlock
+ * @panic_notifier: Panic notifier structure
+*/
+struct ipa_smp2p {
+ struct ipa *ipa;
+ struct qcom_smem_state *valid_state;
+ struct qcom_smem_state *enabled_state;
+ u32 valid_bit;
+ u32 enabled_bit;
+ u32 clock_query_irq;
+ u32 setup_ready_irq;
+ bool clock_on;
+ bool notified;
+ bool disabled;
+ struct mutex mutex;
+ struct notifier_block panic_notifier;
+};
+
+/**
+ * ipa_smp2p_notify() - use SMP2P to tell modem about IPA clock state
+ * @smp2p: SMP2P information
+ *
+ * This is called either when the modem has requested it (by triggering
+ * the modem clock query IPA interrupt) or whenever the AP is shutting down
+ * (via a panic notifier). It sets the two SMP2P state bits--one saying
+ * whether the IPA clock is running, and the other indicating the first bit
+ * is valid.
+ */
+static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
+{
+ u32 value;
+ u32 mask;
+
+ if (smp2p->notified)
+ return;
+
+ smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa);
+
+ /* Signal whether the clock is enabled */
+ mask = BIT(smp2p->enabled_bit);
+ value = smp2p->clock_on ? mask : 0;
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, value);
+
+ /* Now indicate that the enabled flag is valid */
+ mask = BIT(smp2p->valid_bit);
+ value = mask;
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, value);
+
+ smp2p->notified = true;
+}
+
+/* Threaded IRQ handler for modem "ipa-clock-query" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_clk_query_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ ipa_smp2p_notify(smp2p);
+
+ return IRQ_HANDLED;
+}
+
+static int ipa_smp2p_panic_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ipa_smp2p *smp2p;
+
+ smp2p = container_of(nb, struct ipa_smp2p, panic_notifier);
+
+ ipa_smp2p_notify(smp2p);
+
+ if (smp2p->clock_on)
+ ipa_uc_panic_notifier(smp2p->ipa);
+
+ return NOTIFY_DONE;
+}
+
+static int ipa_smp2p_panic_notifier_register(struct ipa_smp2p *smp2p)
+{
+ /* IPA panic handler needs to run before modem shuts down */
+ smp2p->panic_notifier.notifier_call = ipa_smp2p_panic_notifier;
+ smp2p->panic_notifier.priority = INT_MAX; /* Do it early */
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+/* Threaded IRQ handler for modem "ipa-setup-ready" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ mutex_lock(&smp2p->mutex);
+
+ if (!smp2p->disabled) {
+ int ret;
+
+ ret = ipa_setup(smp2p->ipa);
+ if (ret)
+ dev_err(&smp2p->ipa->pdev->dev,
+ "error %d from ipa_setup()\n", ret);
+ smp2p->disabled = true;
+ }
+
+ mutex_unlock(&smp2p->mutex);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialize SMP2P interrupts */
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
+ irq_handler_t handler)
+{
+ struct device *dev = &smp2p->ipa->pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"%s\" IRQ property\n",
+ ret, name);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\" IRQ\n", ret, name);
+ return ret;
+ }
+
+ return irq;
+}
+
+static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
+{
+ free_irq(irq, smp2p);
+}
+
+/* Drop the clock reference if it was taken in ipa_smp2p_notify() */
+static void ipa_smp2p_clock_release(struct ipa *ipa)
+{
+ if (!ipa->smp2p->clock_on)
+ return;
+
+ ipa_clock_put(ipa);
+ ipa->smp2p->clock_on = false;
+}
+
+/* Initialize the IPA SMP2P subsystem */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+{
+ struct qcom_smem_state *enabled_state;
+ struct device *dev = &ipa->pdev->dev;
+ struct qcom_smem_state *valid_state;
+ struct ipa_smp2p *smp2p;
+ u32 enabled_bit;
+ u32 valid_bit;
+ int ret;
+
+ valid_state = qcom_smem_state_get(dev, "ipa-clock-enabled-valid",
+ &valid_bit);
+ if (IS_ERR(valid_state))
+ return PTR_ERR(valid_state);
+ if (valid_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ enabled_state = qcom_smem_state_get(dev, "ipa-clock-enabled",
+ &enabled_bit);
+ if (IS_ERR(enabled_state))
+ return PTR_ERR(enabled_state);
+ if (enabled_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ smp2p = kzalloc(sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->ipa = ipa;
+
+ /* These fields are needed by the clock query interrupt
+ * handler, so initialize them now.
+ */
+ mutex_init(&smp2p->mutex);
+ smp2p->valid_state = valid_state;
+ smp2p->valid_bit = valid_bit;
+ smp2p->enabled_state = enabled_state;
+ smp2p->enabled_bit = enabled_bit;
+
+ /* We have enough information saved to handle notifications */
+ ipa->smp2p = smp2p;
+
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ipa_smp2p_modem_clk_query_isr);
+ if (ret < 0)
+ goto err_null_smp2p;
+ smp2p->clock_query_irq = ret;
+
+ ret = ipa_smp2p_panic_notifier_register(smp2p);
+ if (ret)
+ goto err_irq_exit;
+
+ if (modem_init) {
+ /* Result will be non-zero (negative for error) */
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ipa_smp2p_modem_setup_ready_isr);
+ if (ret < 0)
+ goto err_notifier_unregister;
+ smp2p->setup_ready_irq = ret;
+ }
+
+ return 0;
+
+err_notifier_unregister:
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+err_irq_exit:
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+err_null_smp2p:
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+
+ return ret;
+}
+
+void ipa_smp2p_exit(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (smp2p->setup_ready_irq)
+ ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq);
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+ /* We won't get notified any more; drop clock reference (if any) */
+ ipa_smp2p_clock_release(ipa);
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+}
+
+void ipa_smp2p_disable(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (!smp2p->setup_ready_irq)
+ return;
+
+ mutex_lock(&smp2p->mutex);
+
+ smp2p->disabled = true;
+
+ mutex_unlock(&smp2p->mutex);
+}
+
+/* Reset state tracking whether we have notified the modem */
+void ipa_smp2p_notify_reset(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+ u32 mask;
+
+ if (!smp2p->notified)
+ return;
+
+ ipa_smp2p_clock_release(ipa);
+
+ /* Reset the clock enabled valid flag */
+ mask = BIT(smp2p->valid_bit);
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, 0);
+
+ /* Mark the clock disabled for good measure... */
+ mask = BIT(smp2p->enabled_bit);
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0);
+
+ smp2p->notified = false;
+}
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
new file mode 100644
index 000000000000..1f65cdc9d406
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_SMP2P_H_
+#define _IPA_SMP2P_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/**
+ * ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
+ * @ipa: IPA pointer
+ * @modem_init: Whether the modem is responsible for GSI initialization
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+
+/**
+ * ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
+ * @ipa: IPA pointer
+ */
+void ipa_smp2p_exit(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
+ * @IPA: IPA pointer
+ *
+ * Prevent handling of the "setup ready" interrupt from the modem.
+ * This is used before initiating shutdown of the driver.
+ */
+void ipa_smp2p_disable(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_notify_reset() - Reset modem notification state
+ * @ipa: IPA pointer
+ *
+ * If the modem crashes it queries the IPA clock state. In cleaning
+ * up after such a crash this is used to reset some state maintained
+ * for managing this notification.
+ */
+void ipa_smp2p_notify_reset(struct ipa *ipa);
+
+#endif /* _IPA_SMP2P_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
new file mode 100644
index 000000000000..9df2a3e78c98
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/build_bug.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_cmd.h"
+#include "gsi.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: IPA Filter and Route Tables
+ *
+ * The IPA has tables defined in its local shared memory that define filter
+ * and routing rules. Each entry in these tables contains a 64-bit DMA
+ * address that refers to DRAM (system memory) containing a rule definition.
+ * A rule consists of a contiguous block of 32-bit values terminated with
+ * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
+ * represents "no filtering" or "no routing," and is the reset value for
+ * filter or route table rules. Separate tables (both filter and route)
+ * used for IPv4 and IPv6. Additionally, there can be hashed filter or
+ * route tables, which are used when a hash of message metadata matches.
+ * Hashed operation is not supported by all IPA hardware.
+ *
+ * Each filter rule is associated with an AP or modem TX endpoint, though
+ * not all TX endpoints support filtering. The first 64-bit entry in a
+ * filter table is a bitmap indicating which endpoints have entries in
+ * the table. The low-order bit (bit 0) in this bitmap represents a
+ * special global filter, which applies to all traffic. This is not
+ * used in the current code. Bit 1, if set, indicates that there is an
+ * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
+ * table. Bit 2, if set, indicates there is an entry for endpoint 1,
+ * and so on. Space is set aside in IPA local memory to hold as many
+ * filter table entries as might be required, but typically they are not
+ * all used.
+ *
+ * The AP initializes all entries in a filter table to refer to a "zero"
+ * entry. Once initialized the modem and AP update the entries for
+ * endpoints they "own" directly. Currently the AP does not use the
+ * IPA filtering functionality.
+ *
+ * IPA Filter Table
+ * ----------------------
+ * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5)
+ * |--------------------|
+ * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
+ * |--------------------|
+ * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * ----------------------
+ *
+ * The set of available route rules is divided about equally between the AP
+ * and modem. The AP initializes all entries in a route table to refer to
+ * a "zero entry". Once initialized, the modem and AP are responsible for
+ * updating their own entries. All entries in a route table are usable,
+ * though the AP currently does not use the IPA routing functionality.
+ *
+ * IPA Route Table
+ * ----------------------
+ * 1st modem route | 0x0001234500001100 | DMA address for first route rule
+ * |--------------------|
+ * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
+ * |--------------------|
+ * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
+ * |--------------------|
+ * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last AP route | 0x0001234500002280 | DMA address for last route rule
+ * ----------------------
+ */
+
+/* IPA hardware constrains filter and route tables alignment */
+#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
+
+/* Assignment of route table entries to the modem and AP */
+#define IPA_ROUTE_MODEM_MIN 0
+#define IPA_ROUTE_MODEM_COUNT 8
+
+#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
+#define IPA_ROUTE_AP_COUNT \
+ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
+
+/* Filter or route rules consist of a set of 32-bit values followed by a
+ * 32-bit all-zero rule list terminator. The "zero rule" is simply an
+ * all-zero rule followed by the list terminator.
+ */
+#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
+
+#ifdef IPA_VALIDATE
+
+/* Check things that can be validated at build time. */
+static void ipa_table_validate_build(void)
+{
+ /* IPA hardware accesses memory 128 bytes at a time. Addresses
+ * referred to by entries in filter and route tables must be
+ * aligned on 128-byte byte boundaries. The only rule address
+ * ever use is the "zero rule", and it's aligned at the base
+ * of a coherent DMA allocation.
+ */
+ BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
+
+ /* Filter and route tables contain DMA addresses that refer to
+ * filter or route rules. We use a fixed constant to represent
+ * the size of either type of table entry. Code in ipa_table_init()
+ * uses a pointer to __le64 to initialize table entriews.
+ */
+ BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
+ BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
+
+ /* A "zero rule" is used to represent no filtering or no routing.
+ * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
+ * assumes that it can be written using a pointer to __le64.
+ */
+ BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
+
+ /* Impose a practical limit on the number of routes */
+ BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
+ /* The modem must be allotted at least one route table entry */
+ BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
+ /* But it can't have more than what is available */
+ BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
+
+}
+
+static bool
+ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
+ u32 size;
+
+ if (route) {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V6_ROUTE];
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V4_ROUTE];
+ size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
+ } else {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V6_FILTER];
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V4_FILTER];
+ size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
+ }
+
+ if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
+ return false;
+
+ /* mem->size >= size is sufficient, but we'll demand more */
+ if (mem->size == size)
+ return true;
+
+ /* Hashed table regions can be zero size if hashing is not supported */
+ if (hashed && !mem->size)
+ return true;
+
+ dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter", mem->size, size);
+
+ return false;
+}
+
+/* Verify the filter and route table memory regions are the expected size */
+bool ipa_table_valid(struct ipa *ipa)
+{
+ bool valid = true;
+
+ valid = valid && ipa_table_valid_one(ipa, false, false, false);
+ valid = valid && ipa_table_valid_one(ipa, false, false, true);
+ valid = valid && ipa_table_valid_one(ipa, false, true, false);
+ valid = valid && ipa_table_valid_one(ipa, false, true, true);
+ valid = valid && ipa_table_valid_one(ipa, true, false, false);
+ valid = valid && ipa_table_valid_one(ipa, true, false, true);
+ valid = valid && ipa_table_valid_one(ipa, true, true, false);
+ valid = valid && ipa_table_valid_one(ipa, true, true, true);
+
+ return valid;
+}
+
+bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 count;
+
+ if (!filter_map) {
+ dev_err(dev, "at least one filtering endpoint is required\n");
+
+ return false;
+ }
+
+ count = hweight32(filter_map);
+ if (count > IPA_FILTER_COUNT_MAX) {
+ dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
+ count, IPA_FILTER_COUNT_MAX);
+
+ return false;
+ }
+
+ return true;
+}
+
+#else /* !IPA_VALIDATE */
+static void ipa_table_validate_build(void)
+
+{
+}
+
+#endif /* !IPA_VALIDATE */
+
+/* Zero entry count means no table, so just return a 0 address */
+static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
+{
+ u32 skip;
+
+ if (!count)
+ return 0;
+
+/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
+
+ /* Skip over the zero rule and possibly the filter mask */
+ skip = filter_mask ? 1 : 2;
+
+ return ipa->table_addr + skip * sizeof(*ipa->table_virt);
+}
+
+static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
+ u16 first, u16 count, const struct ipa_mem *mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t addr;
+ u32 offset;
+ u16 size;
+
+ /* Nothing to do if the table memory regions is empty */
+ if (!mem->size)
+ return;
+
+ if (filter)
+ first++; /* skip over bitmap */
+
+ offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
+ size = count * IPA_TABLE_ENTRY_SIZE;
+ addr = ipa_table_addr(ipa, false, count);
+
+ ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
+}
+
+/* Reset entries in a single filter table belonging to either the AP or
+ * modem to refer to the zero entry. The memory region supplied will be
+ * for the IPv4 and IPv6 non-hashed and hashed filter tables.
+ */
+static int
+ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
+{
+ u32 ep_mask = ipa->filter_map;
+ u32 count = hweight32(ep_mask);
+ struct gsi_trans *trans;
+ enum gsi_ee_id ee_id;
+
+ if (!mem->size)
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s filter reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id != ee_id)
+ continue;
+
+ ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
+ }
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Theoretically, each filter table could have more filter slots to
+ * update than the maximum number of commands in a transaction. So
+ * we do each table separately.
+ */
+static int ipa_filter_reset(struct ipa *ipa, bool modem)
+{
+ int ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
+ modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
+ if (ret)
+ return ret;
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
+ modem);
+
+ return ret;
+}
+
+/* The AP routes and modem routes are each contiguous within the
+ * table. We can update each table with a single command, and we
+ * won't exceed the per-transaction command limit.
+ * */
+static int ipa_route_reset(struct ipa *ipa, bool modem)
+{
+ struct gsi_trans *trans;
+ u16 first;
+ u16 count;
+
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s route reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ if (modem) {
+ first = IPA_ROUTE_MODEM_MIN;
+ count = IPA_ROUTE_MODEM_COUNT;
+ } else {
+ first = IPA_ROUTE_AP_MIN;
+ count = IPA_ROUTE_AP_COUNT;
+ }
+
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V4_ROUTE]);
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V6_ROUTE]);
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_reset(struct ipa *ipa, bool modem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const char *ee_name;
+ int ret;
+
+ ee_name = modem ? "modem" : "AP";
+
+ /* Report errors, but reset filter and route tables */
+ ret = ipa_filter_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting filter table for %s\n",
+ ret, ee_name);
+
+ ret = ipa_route_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting route table for %s\n",
+ ret, ee_name);
+}
+
+int ipa_table_hash_flush(struct ipa *ipa)
+{
+ u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ struct gsi_trans *trans;
+ u32 val;
+
+ /* IPA version 4.2 does not support hashed tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ return -EBUSY;
+ }
+
+ val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
+ val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
+
+ ipa_cmd_register_write_add(trans, offset, val, val, false);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
+ enum ipa_cmd_opcode opcode,
+ const struct ipa_mem *mem,
+ const struct ipa_mem *hash_mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t hash_addr;
+ dma_addr_t addr;
+ u16 hash_count;
+ u16 hash_size;
+ u16 count;
+ u16 size;
+
+ /* The number of filtering endpoints determines number of entries
+ * in the filter table. The hashed and non-hashed filter table
+ * will have the same number of entries. The size of the route
+ * table region determines the number of entries it has.
+ */
+ if (filter) {
+ count = hweight32(ipa->filter_map);
+ hash_count = hash_mem->size ? count : 0;
+ } else {
+ count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+ size = count * IPA_TABLE_ENTRY_SIZE;
+ hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
+
+ addr = ipa_table_addr(ipa, filter, count);
+ hash_addr = ipa_table_addr(ipa, filter, hash_count);
+
+ ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
+ hash_size, hash_mem->offset, hash_addr);
+}
+
+int ipa_table_setup(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ return -EBUSY;
+ }
+
+ ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
+ &ipa->mem[IPA_MEM_V4_ROUTE],
+ &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+
+ ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
+ &ipa->mem[IPA_MEM_V6_ROUTE],
+ &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+
+ ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
+ &ipa->mem[IPA_MEM_V4_FILTER],
+ &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
+
+ ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
+ &ipa->mem[IPA_MEM_V6_FILTER],
+ &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_teardown(struct ipa *ipa)
+{
+ /* Nothing to do */ /* XXX Maybe reset the tables? */
+}
+
+/**
+ * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
+ * @endpoint_id: Endpoint whose filter hash tuple should be zeroed
+ *
+ * Endpoint must be for the AP (not modem) and support filtering. Updates
+ * the filter hash values without changing route ones.
+ */
+static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 offset;
+ u32 val;
+
+ offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+
+ val = ioread32(endpoint->ipa->reg_virt + offset);
+
+ /* Zero all filter-related fields, preserving the rest */
+ u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_filter_config(struct ipa *ipa, bool modem)
+{
+ enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ u32 ep_mask = ipa->filter_map;
+
+ /* IPA version 4.2 has no hashed route tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id == ee_id)
+ ipa_filter_tuple_zero(endpoint);
+ }
+}
+
+static void ipa_filter_deconfig(struct ipa *ipa, bool modem)
+{
+ /* Nothing to do */
+}
+
+static bool ipa_route_id_modem(u32 route_id)
+{
+ return route_id >= IPA_ROUTE_MODEM_MIN &&
+ route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
+}
+
+/**
+ * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
+ * @route_id: Route table entry whose hash tuple should be zeroed
+ *
+ * Updates the route hash values without changing filter ones.
+ */
+static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
+{
+ u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ u32 val;
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ /* Zero all route-related fields, preserving the rest */
+ u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_route_config(struct ipa *ipa, bool modem)
+{
+ u32 route_id;
+
+ /* IPA version 4.2 has no hashed route tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
+ if (ipa_route_id_modem(route_id) == modem)
+ ipa_route_tuple_zero(ipa, route_id);
+}
+
+static void ipa_route_deconfig(struct ipa *ipa, bool modem)
+{
+ /* Nothing to do */
+}
+
+void ipa_table_config(struct ipa *ipa)
+{
+ ipa_filter_config(ipa, false);
+ ipa_filter_config(ipa, true);
+ ipa_route_config(ipa, false);
+ ipa_route_config(ipa, true);
+}
+
+void ipa_table_deconfig(struct ipa *ipa)
+{
+ ipa_route_deconfig(ipa, true);
+ ipa_route_deconfig(ipa, false);
+ ipa_filter_deconfig(ipa, true);
+ ipa_filter_deconfig(ipa, false);
+}
+
+/*
+ * Initialize a coherent DMA allocation containing initialized filter and
+ * route table data. This is used when initializing or resetting the IPA
+ * filter or route table.
+ *
+ * The first entry in a filter table contains a bitmap indicating which
+ * endpoints contain entries in the table. In addition to that first entry,
+ * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table
+ * entries are 64 bits wide, and (other than the bitmap) contain the DMA
+ * address of a filter rule. A "zero rule" indicates no filtering, and
+ * consists of 64 bits of zeroes. When a filter table is initialized (or
+ * reset) its entries are made to refer to the zero rule.
+ *
+ * Each entry in a route table is the DMA address of a routing rule. For
+ * routing there is also a 64-bit "zero rule" that means no routing, and
+ * when a route table is initialized or reset, its entries are made to refer
+ * to the zero rule. The zero rule is shared for route and filter tables.
+ *
+ * Note that the IPA hardware requires a filter or route rule address to be
+ * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
+ * has a minimum alignment, and we place the zero rule at the base of that
+ * allocated space. In ipa_table_init() we verify the minimum DMA allocation
+ * meets our requirement.
+ *
+ * +-------------------+
+ * --> | zero rule |
+ * / |-------------------|
+ * | | filter mask |
+ * |\ |-------------------|
+ * | ---- zero rule address | \
+ * |\ |-------------------| |
+ * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
+ * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
+ * | ... | whichever is greater
+ * \ |-------------------| |
+ * ---- zero rule address | /
+ * +-------------------+
+ */
+int ipa_table_init(struct ipa *ipa)
+{
+ u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ struct device *dev = &ipa->pdev->dev;
+ dma_addr_t addr;
+ __le64 le_addr;
+ __le64 *virt;
+ size_t size;
+
+ ipa_table_validate_build();
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ ipa->table_virt = virt;
+ ipa->table_addr = addr;
+
+ /* First slot is the zero rule */
+ *virt++ = 0;
+
+ /* Next is the filter table bitmap. The "soft" bitmap value
+ * must be converted to the hardware representation by shifting
+ * it left one position. (Bit 0 repesents global filtering,
+ * which is possible but not used.)
+ */
+ *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
+
+ /* All the rest contain the DMA address of the zero rule */
+ le_addr = cpu_to_le64(addr);
+ while (count--)
+ *virt++ = le_addr;
+
+ return 0;
+}
+
+void ipa_table_exit(struct ipa *ipa)
+{
+ u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ struct device *dev = &ipa->pdev->dev;
+ size_t size;
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+
+ dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
+ ipa->table_addr = 0;
+ ipa->table_virt = NULL;
+}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
new file mode 100644
index 000000000000..64ea0221441a
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_TABLE_H_
+#define _IPA_TABLE_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/* The size of a filter or route table entry */
+#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
+
+/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
+#define IPA_FILTER_COUNT_MAX 14
+
+/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
+#define IPA_ROUTE_COUNT_MAX 15
+
+#ifdef IPA_VALIDATE
+
+/**
+ * ipa_table_valid() - Validate route and filter table memory regions
+ * @ipa: IPA pointer
+
+ * @Return: true if all regions are valid, false otherwise
+ */
+bool ipa_table_valid(struct ipa *ipa);
+
+/**
+ * ipa_filter_map_valid() - Validate a filter table endpoint bitmap
+ * @ipa: IPA pointer
+ *
+ * @Return: true if all regions are valid, false otherwise
+ */
+bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
+
+#else /* !IPA_VALIDATE */
+
+static inline bool ipa_table_valid(struct ipa *ipa)
+{
+ return true;
+}
+
+static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/**
+ * ipa_table_reset() - Reset filter and route tables entries to "none"
+ * @ipa: IPA pointer
+ * @modem: Whether to reset modem or AP entries
+ */
+void ipa_table_reset(struct ipa *ipa, bool modem);
+
+/**
+ * ipa_table_hash_flush() - Synchronize hashed filter and route updates
+ * @ipa: IPA pointer
+ */
+int ipa_table_hash_flush(struct ipa *ipa);
+
+/**
+ * ipa_table_setup() - Set up filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_setup(struct ipa *ipa);
+
+/**
+ * ipa_table_teardown() - Inverse of ipa_table_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_table_teardown(struct ipa *ipa);
+
+/**
+ * ipa_table_config() - Configure filter and route tables
+ * @ipa: IPA pointer
+ */
+void ipa_table_config(struct ipa *ipa);
+
+/**
+ * ipa_table_deconfig() - Inverse of ipa_table_config()
+ * @ipa: IPA pointer
+ */
+void ipa_table_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_table_init() - Do early initialization of filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_init(struct ipa *ipa);
+
+/**
+ * ipa_table_exit() - Inverse of ipa_table_init()
+ * @ipa: IPA pointer
+ */
+void ipa_table_exit(struct ipa *ipa);
+
+#endif /* _IPA_TABLE_H_ */
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
new file mode 100644
index 000000000000..a1f8db00d55a
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_uc.h"
+
+/**
+ * DOC: The IPA embedded microcontroller
+ *
+ * The IPA incorporates a microcontroller that is able to do some additional
+ * handling/offloading of network activity. The current code makes
+ * essentially no use of the microcontroller, but it still requires some
+ * initialization. It needs to be notified in the event the AP crashes.
+ *
+ * The microcontroller can generate two interrupts to the AP. One interrupt
+ * is used to indicate that a response to a request from the AP is available.
+ * The other is used to notify the AP of the occurrence of an event. In
+ * addition, the AP can interrupt the microcontroller by writing a register.
+ *
+ * A 128 byte block of structured memory within the IPA SRAM is used together
+ * with these interrupts to implement the communication interface between the
+ * AP and the IPA microcontroller. Each side writes data to the shared area
+ * before interrupting its peer, which will read the written data in response
+ * to the interrupt. Some information found in the shared area is currently
+ * unused. All remaining space in the shared area is reserved, and must not
+ * be read or written by the AP.
+ */
+/* Supports hardware interface version 0x2000 */
+
+/* Offset relative to the base of the IPA shared address space of the
+ * shared region used for communication with the microcontroller. The
+ * region is 128 bytes in size, but only the first 40 bytes are used.
+ */
+#define IPA_MEM_UC_OFFSET 0x0000
+
+/* Delay to allow a the microcontroller to save state when crashing */
+#define IPA_SEND_DELAY 100 /* microseconds */
+
+/**
+ * struct ipa_uc_mem_area - AP/microcontroller shared memory area
+ * @command: command code (AP->microcontroller)
+ * @command_param: low 32 bits of command parameter (AP->microcontroller)
+ * @command_param_hi: high 32 bits of command parameter (AP->microcontroller)
+ *
+ * @response: response code (microcontroller->AP)
+ * @response_param: response parameter (microcontroller->AP)
+ *
+ * @event: event code (microcontroller->AP)
+ * @event_param: event parameter (microcontroller->AP)
+ *
+ * @first_error_address: address of first error-source on SNOC
+ * @hw_state: state of hardware (including error type information)
+ * @warning_counter: counter of non-fatal hardware errors
+ * @interface_version: hardware-reported interface version
+ */
+struct ipa_uc_mem_area {
+ u8 command; /* enum ipa_uc_command */
+ u8 reserved0[3];
+ __le32 command_param;
+ __le32 command_param_hi;
+ u8 response; /* enum ipa_uc_response */
+ u8 reserved1[3];
+ __le32 response_param;
+ u8 event; /* enum ipa_uc_event */
+ u8 reserved2[3];
+
+ __le32 event_param;
+ __le32 first_error_address;
+ u8 hw_state;
+ u8 warning_counter;
+ __le16 reserved3;
+ __le16 interface_version;
+ __le16 reserved4;
+};
+
+/** enum ipa_uc_command - commands from the AP to the microcontroller */
+enum ipa_uc_command {
+ IPA_UC_COMMAND_NO_OP = 0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 3,
+ IPA_UC_COMMAND_ERR_FATAL = 4,
+ IPA_UC_COMMAND_CLK_GATE = 5,
+ IPA_UC_COMMAND_CLK_UNGATE = 6,
+ IPA_UC_COMMAND_MEMCPY = 7,
+ IPA_UC_COMMAND_RESET_PIPE = 8,
+ IPA_UC_COMMAND_REG_WRITE = 9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 10,
+};
+
+/** enum ipa_uc_response - microcontroller response codes */
+enum ipa_uc_response {
+ IPA_UC_RESPONSE_NO_OP = 0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 3,
+};
+
+/** enum ipa_uc_event - common cpu events reported by the microcontroller */
+enum ipa_uc_event {
+ IPA_UC_EVENT_NO_OP = 0,
+ IPA_UC_EVENT_ERROR = 1,
+ IPA_UC_EVENT_LOG_INFO = 2,
+};
+
+static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
+{
+ u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
+
+ return ipa->mem_virt + offset;
+}
+
+/* Microcontroller event IPA interrupt handler */
+static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
+
+ if (shared->event == IPA_UC_EVENT_ERROR)
+ dev_err(dev, "microcontroller error event\n");
+ else
+ dev_err(dev, "unsupported microcontroller event %hhu\n",
+ shared->event);
+}
+
+/* Microcontroller response IPA interrupt handler */
+static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+
+ /* An INIT_COMPLETED response message is sent to the AP by the
+ * microcontroller when it is operational. Other than this, the AP
+ * should only receive responses from the microcontroller when it has
+ * sent it a request message.
+ *
+ * We can drop the clock reference taken in ipa_uc_init() once we
+ * know the microcontroller has finished its initialization.
+ */
+ switch (shared->response) {
+ case IPA_UC_RESPONSE_INIT_COMPLETED:
+ ipa->uc_loaded = true;
+ ipa_clock_put(ipa);
+ break;
+ default:
+ dev_warn(&ipa->pdev->dev,
+ "unsupported microcontroller response %hhu\n",
+ shared->response);
+ break;
+ }
+}
+
+/* ipa_uc_setup() - Set up the microcontroller */
+void ipa_uc_setup(struct ipa *ipa)
+{
+ /* The microcontroller needs the IPA clock running until it has
+ * completed its initialization. It signals this by sending an
+ * INIT_COMPLETED response message to the AP. This could occur after
+ * we have finished doing the rest of the IPA initialization, so we
+ * need to take an extra "proxy" reference, and hold it until we've
+ * received that signal. (This reference is dropped in
+ * ipa_uc_response_hdlr(), above.)
+ */
+ ipa_clock_get(ipa);
+
+ ipa->uc_loaded = false;
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
+}
+
+/* Inverse of ipa_uc_setup() */
+void ipa_uc_teardown(struct ipa *ipa)
+{
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (!ipa->uc_loaded)
+ ipa_clock_put(ipa);
+}
+
+/* Send a command to the microcontroller */
+static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+
+ shared->command = command;
+ shared->command_param = cpu_to_le32(command_param);
+ shared->command_param_hi = 0;
+ shared->response = 0;
+ shared->response_param = 0;
+
+ iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+}
+
+/* Tell the microcontroller the AP is shutting down */
+void ipa_uc_panic_notifier(struct ipa *ipa)
+{
+ if (!ipa->uc_loaded)
+ return;
+
+ send_uc_command(ipa, IPA_UC_COMMAND_ERR_FATAL, 0);
+
+ /* give uc enough time to save state */
+ udelay(IPA_SEND_DELAY);
+}
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
new file mode 100644
index 000000000000..e8510899a3f0
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_UC_H_
+#define _IPA_UC_H_
+
+struct ipa;
+
+/**
+ * ipa_uc_setup() - set up the IPA microcontroller subsystem
+ * @ipa: IPA pointer
+ */
+void ipa_uc_setup(struct ipa *ipa);
+
+/**
+ * ipa_uc_teardown() - inverse of ipa_uc_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_uc_teardown(struct ipa *ipa);
+
+/**
+ * ipa_uc_panic_notifier()
+ * @ipa: IPA pointer
+ *
+ * Notifier function called when the system crashes, to inform the
+ * microcontroller of the event.
+ */
+void ipa_uc_panic_notifier(struct ipa *ipa);
+
+#endif /* _IPA_UC_H_ */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
new file mode 100644
index 000000000000..85449df0f512
--- /dev/null
+++ b/drivers/net/ipa/ipa_version.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_VERSION_H_
+#define _IPA_VERSION_H_
+
+/**
+ * enum ipa_version
+ *
+ * Defines the version of IPA (and GSI) hardware present on the platform.
+ * It seems this might be better defined elsewhere, but having it here gets
+ * it where it's needed.
+ */
+enum ipa_version {
+ IPA_VERSION_3_5_1, /* GSI version 1.3.0 */
+ IPA_VERSION_4_0, /* GSI version 2.0 */
+ IPA_VERSION_4_1, /* GSI version 2.1 */
+ IPA_VERSION_4_2, /* GSI version 2.2 */
+};
+
+#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 30cd0c4f0be0..8801d093135c 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work)
}
if (dev)
dev_put(dev);
+ cond_resched();
}
}
@@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
- /* In this mode we dont care about multicast and broadcast traffic */
- if (is_multicast_ether_addr(ethh->h_dest)) {
- pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
- ntohs(skb->protocol));
- kfree_skb(skb);
- goto out;
- }
-
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
* will have L2; which need to discarded and processed further
* in the net-ns of the main-device.
*/
if (skb_mac_header_was_set(skb)) {
+ /* In this mode we dont care about
+ * multicast and broadcast traffic */
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_debug_ratelimited(
+ "Dropped {multi|broad}cast of type=[%x]\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ goto out;
+ }
+
skb_pull(skb, sizeof(*ethh));
skb->mac_header = (typeof(skb->mac_header))~0U;
skb_reset_network_header(skb);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index a70662261a5a..f195f278a83a 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev)
ipvlan_ht_addr_add(ipvlan, addr);
rcu_read_unlock();
- return dev_uc_add(phy_dev, phy_dev->dev_addr);
+ return 0;
}
static int ipvlan_stop(struct net_device *dev)
@@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
- dev_uc_del(phy_dev, phy_dev->dev_addr);
-
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 45bfd99f17fa..6ec6fc191a6e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -424,6 +424,11 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -3268,6 +3273,20 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
ether_addr_copy(dev->dev_addr, addr->sa_data);
+ macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_upd_secy, &ctx);
+ }
+ }
+
return 0;
}
@@ -3342,6 +3361,7 @@ static const struct device_type macsec_type = {
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
+ [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
@@ -3592,11 +3612,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 81aa7adf4801..e7289d67268f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
if (src)
dev_put(src->dev);
consume_skb(skb);
+
+ cond_resched();
}
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index edb1cb8a228e..cc7f1df855da 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -157,6 +157,14 @@ config MDIO_I2C
This is library mode.
+config MDIO_IPQ8064
+ tristate "Qualcomm IPQ8064 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ depends on MFD_SYSCON
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the IPQ8064 SoC
+
config MDIO_MOXART
tristate "MOXA ART MDIO interface support"
depends on ARCH_MOXART || COMPILE_TEST
@@ -206,6 +214,12 @@ config MDIO_XGENE
This module provides a driver for the MDIO busses found in the
APM X-Gene SoC's.
+config MDIO_XPCS
+ tristate "Synopsys DesignWare XPCS controller"
+ help
+ This module provides helper functions for Synopsys DesignWare XPCS
+ controllers.
+
endif
endif
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index d523fd5670e4..26f8039f300f 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -37,12 +37,14 @@ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_MDIO_XPCS) += mdio-xpcs.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 23f1958ba6ad..459fb2069c7e 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
+ .name = "Broadcom BCM63XX (2)",
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 9a4e12a2af07..7e05b92504f0 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -23,6 +23,7 @@
* link takes priority and the other port is completely locked out.
*/
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
@@ -39,6 +40,16 @@ enum {
MV_PCS_BASE_R = 0x1000,
MV_PCS_1000BASEX = 0x2000,
+ MV_PCS_CSCR1 = 0x8000,
+ MV_PCS_CSCR1_ED_MASK = 0x0300,
+ MV_PCS_CSCR1_ED_OFF = 0x0000,
+ MV_PCS_CSCR1_ED_RX = 0x0200,
+ MV_PCS_CSCR1_ED_NLP = 0x0300,
+ MV_PCS_CSCR1_MDIX_MASK = 0x0060,
+ MV_PCS_CSCR1_MDIX_MDI = 0x0000,
+ MV_PCS_CSCR1_MDIX_MDIX = 0x0020,
+ MV_PCS_CSCR1_MDIX_AUTO = 0x0060,
+
MV_PCS_CSSR1 = 0x8008,
MV_PCS_CSSR1_SPD1_MASK = 0xc000,
MV_PCS_CSSR1_SPD1_SPD2 = 0xc000,
@@ -216,6 +227,91 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int mv3310_power_down(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+}
+
+static int mv3310_power_up(struct phy_device *phydev)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+}
+
+static int mv3310_reset(struct phy_device *phydev, u32 unit)
+{
+ int retries, val, err;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_PCS, unit + MDIO_CTRL1,
+ MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
+ if (err < 0)
+ return err;
+
+ retries = 20;
+ do {
+ msleep(5);
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, unit + MDIO_CTRL1);
+ if (val < 0)
+ return val;
+ } while (val & MDIO_CTRL1_RESET && --retries);
+
+ return val & MDIO_CTRL1_RESET ? -ETIMEDOUT : 0;
+}
+
+static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1);
+ if (val < 0)
+ return val;
+
+ switch (val & MV_PCS_CSCR1_ED_MASK) {
+ case MV_PCS_CSCR1_ED_NLP:
+ *edpd = 1000;
+ break;
+ case MV_PCS_CSCR1_ED_RX:
+ *edpd = ETHTOOL_PHY_EDPD_NO_TX;
+ break;
+ default:
+ *edpd = ETHTOOL_PHY_EDPD_DISABLE;
+ break;
+ }
+ return 0;
+}
+
+static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
+{
+ u16 val;
+ int err;
+
+ switch (edpd) {
+ case 1000:
+ case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
+ val = MV_PCS_CSCR1_ED_NLP;
+ break;
+
+ case ETHTOOL_PHY_EDPD_NO_TX:
+ val = MV_PCS_CSCR1_ED_RX;
+ break;
+
+ case ETHTOOL_PHY_EDPD_DISABLE:
+ val = MV_PCS_CSCR1_ED_OFF;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1,
+ MV_PCS_CSCR1_ED_MASK, val);
+ if (err > 0)
+ err = mv3310_reset(phydev, MV_PCS_BASE_T);
+
+ return err;
+}
+
static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
@@ -264,6 +360,11 @@ static int mv3310_probe(struct phy_device *phydev)
dev_set_drvdata(&phydev->mdio.dev, priv);
+ /* Powering down the port when not in use saves about 600mW */
+ ret = mv3310_power_down(phydev);
+ if (ret)
+ return ret;
+
ret = mv3310_hwmon_probe(phydev);
if (ret)
return ret;
@@ -273,16 +374,14 @@ static int mv3310_probe(struct phy_device *phydev)
static int mv3310_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_PWRDOWN);
+ return mv3310_power_down(phydev);
}
static int mv3310_resume(struct phy_device *phydev)
{
int ret;
- ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_PWRDOWN);
+ ret = mv3310_power_up(phydev);
if (ret)
return ret;
@@ -308,6 +407,8 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev)
static int mv3310_config_init(struct phy_device *phydev)
{
+ int err;
+
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
@@ -316,7 +417,15 @@ static int mv3310_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
- return 0;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* Power up so reset works */
+ err = mv3310_power_up(phydev);
+ if (err)
+ return err;
+
+ /* Enable EDPD mode - saving 600mW */
+ return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
}
static int mv3310_get_features(struct phy_device *phydev)
@@ -345,14 +454,42 @@ static int mv3310_get_features(struct phy_device *phydev)
return 0;
}
+static int mv3310_config_mdix(struct phy_device *phydev)
+{
+ u16 val;
+ int err;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI_AUTO:
+ val = MV_PCS_CSCR1_MDIX_AUTO;
+ break;
+ case ETH_TP_MDI_X:
+ val = MV_PCS_CSCR1_MDIX_MDIX;
+ break;
+ case ETH_TP_MDI:
+ val = MV_PCS_CSCR1_MDIX_MDI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1,
+ MV_PCS_CSCR1_MDIX_MASK, val);
+ if (err > 0)
+ err = mv3310_reset(phydev, MV_PCS_BASE_T);
+
+ return err;
+}
+
static int mv3310_config_aneg(struct phy_device *phydev)
{
bool changed = false;
u16 reg;
int ret;
- /* We don't support manual MDI control */
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ ret = mv3310_config_mdix(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->autoneg == AUTONEG_DISABLE)
return genphy_c45_pma_setup_forced(phydev);
@@ -537,6 +674,28 @@ static int mv3310_read_status(struct phy_device *phydev)
return 0;
}
+static int mv3310_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD:
+ return mv3310_get_edpd(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv3310_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD:
+ return mv3310_set_edpd(phydev, *(u16 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -551,6 +710,8 @@ static struct phy_driver mv3310_drivers[] = {
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
@@ -564,6 +725,8 @@ static struct phy_driver mv3310_drivers[] = {
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
},
};
diff --git a/drivers/net/phy/mdio-ipq8064.c b/drivers/net/phy/mdio-ipq8064.c
new file mode 100644
index 000000000000..1bd18857e1c5
--- /dev/null
+++ b/drivers/net/phy/mdio-ipq8064.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Qualcomm IPQ8064 MDIO interface driver
+ *
+ * Copyright (C) 2019 Christian Lamparter <chunkeey@gmail.com>
+ * Copyright (C) 2020 Ansuel Smith <ansuelsmth@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+
+/* MII address register definitions */
+#define MII_ADDR_REG_ADDR 0x10
+#define MII_BUSY BIT(0)
+#define MII_WRITE BIT(1)
+#define MII_CLKRANGE_60_100M (0 << 2)
+#define MII_CLKRANGE_100_150M (1 << 2)
+#define MII_CLKRANGE_20_35M (2 << 2)
+#define MII_CLKRANGE_35_60M (3 << 2)
+#define MII_CLKRANGE_150_250M (4 << 2)
+#define MII_CLKRANGE_250_300M (5 << 2)
+#define MII_CLKRANGE_MASK GENMASK(4, 2)
+#define MII_REG_SHIFT 6
+#define MII_REG_MASK GENMASK(10, 6)
+#define MII_ADDR_SHIFT 11
+#define MII_ADDR_MASK GENMASK(15, 11)
+
+#define MII_DATA_REG_ADDR 0x14
+
+#define MII_MDIO_DELAY_USEC (1000)
+#define MII_MDIO_RETRY_MSEC (10)
+
+struct ipq8064_mdio {
+ struct regmap *base; /* NSS_GMAC0_BASE */
+};
+
+static int
+ipq8064_mdio_wait_busy(struct ipq8064_mdio *priv)
+{
+ u32 busy;
+
+ return regmap_read_poll_timeout(priv->base, MII_ADDR_REG_ADDR, busy,
+ !(busy & MII_BUSY), MII_MDIO_DELAY_USEC,
+ MII_MDIO_RETRY_MSEC * USEC_PER_MSEC);
+}
+
+static int
+ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset)
+{
+ u32 miiaddr = MII_BUSY | MII_CLKRANGE_250_300M;
+ struct ipq8064_mdio *priv = bus->priv;
+ u32 ret_val;
+ int err;
+
+ /* Reject clause 45 */
+ if (reg_offset & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
+ ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
+
+ regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
+ usleep_range(8, 10);
+
+ err = ipq8064_mdio_wait_busy(priv);
+ if (err)
+ return err;
+
+ regmap_read(priv->base, MII_DATA_REG_ADDR, &ret_val);
+ return (int)ret_val;
+}
+
+static int
+ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
+{
+ u32 miiaddr = MII_WRITE | MII_BUSY | MII_CLKRANGE_250_300M;
+ struct ipq8064_mdio *priv = bus->priv;
+
+ /* Reject clause 45 */
+ if (reg_offset & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ regmap_write(priv->base, MII_DATA_REG_ADDR, data);
+
+ miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
+ ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
+
+ regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
+ usleep_range(8, 10);
+
+ return ipq8064_mdio_wait_busy(priv);
+}
+
+static int
+ipq8064_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ipq8064_mdio *priv;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ipq8064_mdio_bus";
+ bus->read = ipq8064_mdio_read;
+ bus->write = ipq8064_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ priv = bus->priv;
+ priv->base = device_node_to_regmap(np);
+ if (IS_ERR(priv->base)) {
+ if (priv->base == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ dev_err(&pdev->dev, "error getting device regmap, error=%pe\n",
+ priv->base);
+ return PTR_ERR(priv->base);
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, bus);
+ return 0;
+}
+
+static int
+ipq8064_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+
+ return 0;
+}
+
+static const struct of_device_id ipq8064_mdio_dt_ids[] = {
+ { .compatible = "qcom,ipq8064-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids);
+
+static struct platform_driver ipq8064_mdio_driver = {
+ .probe = ipq8064_mdio_probe,
+ .remove = ipq8064_mdio_remove,
+ .driver = {
+ .name = "ipq8064-mdio",
+ .of_match_table = ipq8064_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(ipq8064_mdio_driver);
+
+MODULE_DESCRIPTION("Qualcomm IPQ8064 MDIO interface driver");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
+MODULE_AUTHOR("Ansuel Smith <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-xpcs.c b/drivers/net/phy/mdio-xpcs.c
new file mode 100644
index 000000000000..973f588146f7
--- /dev/null
+++ b/drivers/net/phy/mdio-xpcs.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ *
+ * Author: Jose Abreu <Jose.Abreu@synopsys.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/mdio.h>
+#include <linux/mdio-xpcs.h>
+#include <linux/phylink.h>
+#include <linux/workqueue.h>
+
+#define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0
+#define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0
+#define SYNOPSYS_XPCS_MASK 0xffffffff
+
+/* Vendor regs access */
+#define DW_VENDOR BIT(15)
+
+/* VR_XS_PCS */
+#define DW_USXGMII_RST BIT(10)
+#define DW_USXGMII_EN BIT(9)
+#define DW_VR_XS_PCS_DIG_STS 0x0010
+#define DW_RXFIFO_ERR GENMASK(6, 5)
+
+/* SR_MII */
+#define DW_USXGMII_FULL BIT(8)
+#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
+#define DW_USXGMII_10000 (BIT(13) | BIT(6))
+#define DW_USXGMII_5000 (BIT(13) | BIT(5))
+#define DW_USXGMII_2500 (BIT(5))
+#define DW_USXGMII_1000 (BIT(6))
+#define DW_USXGMII_100 (BIT(13))
+#define DW_USXGMII_10 (0)
+
+/* SR_AN */
+#define DW_SR_AN_ADV1 0x10
+#define DW_SR_AN_ADV2 0x11
+#define DW_SR_AN_ADV3 0x12
+#define DW_SR_AN_LP_ABL1 0x13
+#define DW_SR_AN_LP_ABL2 0x14
+#define DW_SR_AN_LP_ABL3 0x15
+
+/* Clause 73 Defines */
+/* AN_LP_ABL1 */
+#define DW_C73_PAUSE BIT(10)
+#define DW_C73_ASYM_PAUSE BIT(11)
+#define DW_C73_AN_ADV_SF 0x1
+/* AN_LP_ABL2 */
+#define DW_C73_1000KX BIT(5)
+#define DW_C73_10000KX4 BIT(6)
+#define DW_C73_10000KR BIT(7)
+/* AN_LP_ABL3 */
+#define DW_C73_2500KX BIT(0)
+#define DW_C73_5000KR BIT(1)
+
+static const int xpcs_usxgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_10gkr_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const phy_interface_t xpcs_usxgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static const phy_interface_t xpcs_10gkr_interfaces[] = {
+ PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static struct xpcs_id {
+ u32 id;
+ u32 mask;
+ const int *supported;
+ const phy_interface_t *interface;
+} xpcs_id_list[] = {
+ {
+ .id = SYNOPSYS_XPCS_USXGMII_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_usxgmii_features,
+ .interface = xpcs_usxgmii_interfaces,
+ }, {
+ .id = SYNOPSYS_XPCS_10GKR_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_10gkr_features,
+ .interface = xpcs_10gkr_interfaces,
+ },
+};
+
+static int xpcs_read(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+{
+ u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+
+ return mdiobus_read(xpcs->bus, xpcs->addr, reg_addr);
+}
+
+static int xpcs_write(struct mdio_xpcs_args *xpcs, int dev, u32 reg, u16 val)
+{
+ u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+
+ return mdiobus_write(xpcs->bus, xpcs->addr, reg_addr, val);
+}
+
+static int xpcs_read_vendor(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+{
+ return xpcs_read(xpcs, dev, DW_VENDOR | reg);
+}
+
+static int xpcs_write_vendor(struct mdio_xpcs_args *xpcs, int dev, int reg,
+ u16 val)
+{
+ return xpcs_write(xpcs, dev, DW_VENDOR | reg, val);
+}
+
+static int xpcs_read_vpcs(struct mdio_xpcs_args *xpcs, int reg)
+{
+ return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg);
+}
+
+static int xpcs_write_vpcs(struct mdio_xpcs_args *xpcs, int reg, u16 val)
+{
+ return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
+}
+
+static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
+{
+ /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ unsigned int retries = 12;
+ int ret;
+
+ do {
+ msleep(50);
+ ret = xpcs_read(xpcs, dev, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while (ret & MDIO_CTRL1_RESET && --retries);
+
+ return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
+}
+
+static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev)
+{
+ int ret;
+
+ ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_poll_reset(xpcs, dev);
+}
+
+#define xpcs_warn(__xpcs, __state, __args...) \
+({ \
+ if ((__state)->link) \
+ dev_warn(&(__xpcs)->bus->dev, ##__args); \
+})
+
+static int xpcs_read_fault(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT1_FAULT) {
+ xpcs_warn(xpcs, state, "Link fault condition detected!\n");
+ return -EFAULT;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT2_RXFAULT)
+ xpcs_warn(xpcs, state, "Receiver fault detected!\n");
+ if (ret & MDIO_STAT2_TXFAULT)
+ xpcs_warn(xpcs, state, "Transmitter fault detected!\n");
+
+ ret = xpcs_read_vendor(xpcs, MDIO_MMD_PCS, DW_VR_XS_PCS_DIG_STS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_RXFIFO_ERR) {
+ xpcs_warn(xpcs, state, "FIFO fault condition detected!\n");
+ return -EFAULT;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_PCS_10GBRT_STAT1_BLKLK))
+ xpcs_warn(xpcs, state, "Link is not locked!\n");
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_PCS_10GBRT_STAT2_ERR)
+ xpcs_warn(xpcs, state, "Link has errors!\n");
+
+ return 0;
+}
+
+static int xpcs_read_link(struct mdio_xpcs_args *xpcs, bool an)
+{
+ bool link = true;
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_STAT1_LSTATUS))
+ link = false;
+
+ if (an) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_STAT1_LSTATUS))
+ link = false;
+ }
+
+ return link;
+}
+
+static int xpcs_get_max_usxgmii_speed(const unsigned long *supported)
+{
+ int max = SPEED_UNKNOWN;
+
+ if (phylink_test(supported, 1000baseKX_Full))
+ max = SPEED_1000;
+ if (phylink_test(supported, 2500baseX_Full))
+ max = SPEED_2500;
+ if (phylink_test(supported, 10000baseKX4_Full))
+ max = SPEED_10000;
+ if (phylink_test(supported, 10000baseKR_Full))
+ max = SPEED_10000;
+
+ return max;
+}
+
+static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
+{
+ int ret, speed_sel;
+
+ switch (speed) {
+ case SPEED_10:
+ speed_sel = DW_USXGMII_10;
+ break;
+ case SPEED_100:
+ speed_sel = DW_USXGMII_100;
+ break;
+ case SPEED_1000:
+ speed_sel = DW_USXGMII_1000;
+ break;
+ case SPEED_2500:
+ speed_sel = DW_USXGMII_2500;
+ break;
+ case SPEED_5000:
+ speed_sel = DW_USXGMII_5000;
+ break;
+ case SPEED_10000:
+ speed_sel = DW_USXGMII_10000;
+ break;
+ default:
+ /* Nothing to do here */
+ return -EINVAL;
+ }
+
+ ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_USXGMII_SS_MASK;
+ ret |= speed_sel | DW_USXGMII_FULL;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+}
+
+static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+{
+ int ret, adv;
+
+ /* By default, in USXGMII mode XPCS operates at 10G baud and
+ * replicates data to achieve lower speeds. Hereby, in this
+ * default configuration we need to advertise all supported
+ * modes and not only the ones we want to use.
+ */
+
+ /* SR_AN_ADV3 */
+ adv = 0;
+ if (phylink_test(xpcs->supported, 2500baseX_Full))
+ adv |= DW_C73_2500KX;
+
+ /* TODO: 5000baseKR */
+
+ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV3, adv);
+ if (ret < 0)
+ return ret;
+
+ /* SR_AN_ADV2 */
+ adv = 0;
+ if (phylink_test(xpcs->supported, 1000baseKX_Full))
+ adv |= DW_C73_1000KX;
+ if (phylink_test(xpcs->supported, 10000baseKX4_Full))
+ adv |= DW_C73_10000KX4;
+ if (phylink_test(xpcs->supported, 10000baseKR_Full))
+ adv |= DW_C73_10000KR;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV2, adv);
+ if (ret < 0)
+ return ret;
+
+ /* SR_AN_ADV1 */
+ adv = DW_C73_AN_ADV_SF;
+ if (phylink_test(xpcs->supported, Pause))
+ adv |= DW_C73_PAUSE;
+ if (phylink_test(xpcs->supported, Asym_Pause))
+ adv |= DW_C73_ASYM_PAUSE;
+
+ return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv);
+}
+
+static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+
+ ret = xpcs_config_aneg_c73(xpcs);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
+
+ return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret);
+}
+
+static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_AN_STAT1_COMPLETE) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (ret < 0)
+ return ret;
+
+ /* Check if Aneg outcome is valid */
+ if (!(ret & DW_C73_AN_ADV_SF))
+ return 0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_AN_STAT1_LPABLE)) {
+ phylink_clear(state->lp_advertising, Autoneg);
+ return 0;
+ }
+
+ phylink_set(state->lp_advertising, Autoneg);
+
+ /* Clause 73 outcome */
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL3);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_2500KX)
+ phylink_set(state->lp_advertising, 2500baseX_Full);
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_1000KX)
+ phylink_set(state->lp_advertising, 1000baseKX_Full);
+ if (ret & DW_C73_10000KX4)
+ phylink_set(state->lp_advertising, 10000baseKX4_Full);
+ if (ret & DW_C73_10000KR)
+ phylink_set(state->lp_advertising, 10000baseKR_Full);
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_PAUSE)
+ phylink_set(state->lp_advertising, Pause);
+ if (ret & DW_C73_ASYM_PAUSE)
+ phylink_set(state->lp_advertising, Asym_Pause);
+
+ linkmode_and(state->lp_advertising, state->lp_advertising,
+ state->advertising);
+ return 0;
+}
+
+static void xpcs_resolve_lpa(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
+
+ state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->speed = max_speed;
+ state->duplex = DUPLEX_FULL;
+}
+
+static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->duplex = DUPLEX_FULL;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ state->speed = SPEED_10000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int xpcs_validate(struct mdio_xpcs_args *xpcs,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ linkmode_and(supported, supported, xpcs->supported);
+ linkmode_and(state->advertising, state->advertising, xpcs->supported);
+ return 0;
+}
+
+static int xpcs_config(struct mdio_xpcs_args *xpcs,
+ const struct phylink_link_state *state)
+{
+ int ret;
+
+ if (state->an_enabled) {
+ ret = xpcs_config_aneg(xpcs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ /* Link needs to be read first ... */
+ state->link = xpcs_read_link(xpcs, state->an_enabled) > 0 ? 1 : 0;
+
+ /* ... and then we check the faults. */
+ ret = xpcs_read_fault(xpcs, state);
+ if (ret) {
+ ret = xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ if (ret)
+ return ret;
+
+ state->link = 0;
+
+ return xpcs_config(xpcs, state);
+ }
+
+ if (state->link && state->an_enabled && xpcs_aneg_done(xpcs, state)) {
+ state->an_complete = true;
+ xpcs_read_lpa(xpcs, state);
+ xpcs_resolve_lpa(xpcs, state);
+ } else if (state->link) {
+ xpcs_resolve_pma(xpcs, state);
+ }
+
+ return 0;
+}
+
+static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_USXGMII)
+ return xpcs_config_usxgmii(xpcs, speed);
+
+ return 0;
+}
+
+static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+ u32 id;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1);
+ if (ret < 0)
+ return 0xffffffff;
+
+ id = ret << 16;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2);
+ if (ret < 0)
+ return 0xffffffff;
+
+ return id | ret;
+}
+
+static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
+ struct xpcs_id *match,
+ phy_interface_t interface)
+{
+ int i;
+
+ for (i = 0; match->interface[i] != PHY_INTERFACE_MODE_MAX; i++) {
+ if (match->interface[i] == interface)
+ break;
+ }
+
+ if (match->interface[i] == PHY_INTERFACE_MODE_MAX)
+ return false;
+
+ for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(match->supported[i], xpcs->supported);
+
+ return true;
+}
+
+static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface)
+{
+ u32 xpcs_id = xpcs_get_id(xpcs);
+ struct xpcs_id *match = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
+ struct xpcs_id *entry = &xpcs_id_list[i];
+
+ if ((xpcs_id & entry->mask) == entry->id) {
+ match = entry;
+
+ if (xpcs_check_features(xpcs, match, interface))
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static struct mdio_xpcs_ops xpcs_ops = {
+ .validate = xpcs_validate,
+ .config = xpcs_config,
+ .get_state = xpcs_get_state,
+ .link_up = xpcs_link_up,
+ .probe = xpcs_probe,
+};
+
+struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+{
+ return &xpcs_ops;
+}
+EXPORT_SYMBOL_GPL(mdio_xpcs_get_ops);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index d24577de0775..b2eac7ee0288 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -80,10 +80,16 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_EXT_PHY_CNTL_2 24
#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa020
-#define MII_VSC85XX_INT_MASK_WOL 0x0040
+#define MII_VSC85XX_INT_MASK_MDINT BIT(15)
+#define MII_VSC85XX_INT_MASK_LINK_CHG BIT(13)
+#define MII_VSC85XX_INT_MASK_WOL BIT(6)
+#define MII_VSC85XX_INT_MASK_EXT BIT(5)
#define MII_VSC85XX_INT_STATUS 26
+#define MII_VSC85XX_INT_MASK_MASK (MII_VSC85XX_INT_MASK_MDINT | \
+ MII_VSC85XX_INT_MASK_LINK_CHG | \
+ MII_VSC85XX_INT_MASK_EXT)
+
#define MSCC_PHY_WOL_MAC_CONTROL 27
#define EDGE_RATE_CNTL_POS 5
#define EDGE_RATE_CNTL_MASK 0x00E0
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d76e038cf2cb..355bfdef48d2 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -727,7 +727,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
phy_trigger_machine(phydev);
}
- if (phy_clear_interrupt(phydev))
+ /* did_interrupt() may have cleared the interrupt already */
+ if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
goto phy_err;
return IRQ_HANDLED;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7c00b029ebfb..a585faf8b844 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -286,6 +286,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
+ phydev->suspended_by_mdio_bus = 1;
+
return phy_suspend(phydev);
}
@@ -294,9 +296,11 @@ static int mdio_bus_phy_resume(struct device *dev)
struct phy_device *phydev = to_phy_device(dev);
int ret;
- if (!mdio_bus_phy_may_suspend(phydev))
+ if (!phydev->suspended_by_mdio_bus)
goto no_resume;
+ phydev->suspended_by_mdio_bus = 0;
+
ret = phy_resume(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b4367fab7899..a8eeaabb2d18 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -312,11 +312,13 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 1000baseKX_Full);
phylink_set(pl->supported, 2500baseT_Full);
phylink_set(pl->supported, 2500baseX_Full);
phylink_set(pl->supported, 5000baseT_Full);
phylink_set(pl->supported, 10000baseT_Full);
phylink_set(pl->supported, 10000baseKR_Full);
+ phylink_set(pl->supported, 10000baseKX4_Full);
phylink_set(pl->supported, 10000baseCR_Full);
phylink_set(pl->supported, 10000baseSR_Full);
phylink_set(pl->supported, 10000baseLR_Full);
@@ -338,6 +340,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
"failed to validate link configuration for in-band status\n");
return -EINVAL;
}
+
+ /* Check if MAC/PCS also supports Autoneg. */
+ pl->link_config.an_enabled = phylink_test(pl->supported, Autoneg);
}
return 0;
@@ -797,8 +802,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
config.interface = interface;
ret = phylink_validate(pl, supported, &config);
- if (ret)
+ if (ret) {
+ phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
+ ret);
return ret;
+ }
phy->phylink = pl;
phy->phy_link_change = phylink_phy_change;
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 58a69f830d29..f78ceba42e57 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
struct cstate *cs = lcs->next;
unsigned long deltaS, deltaA;
short changes = 0;
- int hlen;
+ int nlen, hlen;
unsigned char new_seq[16];
unsigned char *cp = new_seq;
struct iphdr *ip;
@@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
return isize;
ip = (struct iphdr *) icp;
+ if (ip->version != 4 || ip->ihl < 5)
+ return isize;
/* Bail if this packet isn't TCP, or is an IP fragment */
if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
@@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
comp->sls_o_tcp++;
return isize;
}
- /* Extract TCP header */
+ nlen = ip->ihl * 4;
+ if (isize < nlen + sizeof(*th))
+ return isize;
- th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
- hlen = ip->ihl*4 + th->doff*4;
+ th = (struct tcphdr *)(icp + nlen);
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ return isize;
+ hlen = nlen + th->doff * 4;
/* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
* some other control bit is set). Also uncompressible if
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index babb01888b78..f81fb0b13a94 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -456,11 +456,8 @@ static void slip_write_wakeup(struct tty_struct *tty)
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
- if (!sl)
- goto out;
-
- schedule_work(&sl->tx_work);
-out:
+ if (sl)
+ schedule_work(&sl->tx_work);
rcu_read_unlock();
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ca70a1d840eb..4004f98e50d9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
[TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
[TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
[TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
+ [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
+ [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
};
static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 79f248cb282d..228fe449dc6d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -75,35 +75,6 @@
static void tun_default_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd);
-/* Uncomment to enable debugging */
-/* #define TUN_DEBUG 1 */
-
-#ifdef TUN_DEBUG
-static int debug;
-
-#define tun_debug(level, tun, fmt, args...) \
-do { \
- if (tun->debug) \
- netdev_printk(level, tun->dev, fmt, ##args); \
-} while (0)
-#define DBG1(level, fmt, args...) \
-do { \
- if (debug == 2) \
- printk(level fmt, ##args); \
-} while (0)
-#else
-#define tun_debug(level, tun, fmt, args...) \
-do { \
- if (0) \
- netdev_printk(level, tun->dev, fmt, ##args); \
-} while (0)
-#define DBG1(level, fmt, args...) \
-do { \
- if (0) \
- printk(level fmt, ##args); \
-} while (0)
-#endif
-
#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */
@@ -225,9 +196,7 @@ struct tun_struct {
struct sock_fprog fprog;
/* protected by rtnl lock */
bool filter_attached;
-#ifdef TUN_DEBUG
- int debug;
-#endif
+ u32 msg_enable;
spinlock_t lock;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
@@ -423,8 +392,9 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e) {
- tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
- rxhash, queue_index);
+ netif_info(tun, tx_queued, tun->dev,
+ "create flow: hash %u index %u\n",
+ rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
e->rps_rxhash = 0;
@@ -438,8 +408,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
- tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
- e->rxhash, e->queue_index);
+ netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
+ e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -485,8 +455,6 @@ static void tun_flow_cleanup(struct timer_list *t)
unsigned long count = 0;
int i;
- tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
-
spin_lock(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
@@ -546,8 +514,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_unlock();
}
-/**
- * Save the hash received in the stack receive path and update the
+/* Save the hash received in the stack receive path and update the
* flow_hash table accordingly.
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
@@ -1076,7 +1043,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
- tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+ netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
@@ -1433,8 +1400,6 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
sk = tfile->socket.sk;
- tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
-
poll_wait(file, sk_sleep(sk), wait);
if (!ptr_ring_empty(&tfile->tx_ring))
@@ -2205,8 +2170,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
ssize_t ret;
int err;
- tun_debug(KERN_INFO, tun, "tun_do_read\n");
-
if (!iov_iter_count(to)) {
tun_ptr_free(ptr);
return 0;
@@ -2851,8 +2814,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
netif_carrier_on(tun->dev);
- tun_debug(KERN_INFO, tun, "tun_set_iff\n");
-
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
@@ -2883,8 +2844,6 @@ err_free_dev:
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
- tun_debug(KERN_INFO, tun, "tun_get_iff\n");
-
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
@@ -3108,7 +3067,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (!tun)
goto unlock;
- tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
+ netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
net = dev_net(tun->dev);
ret = 0;
@@ -3129,8 +3088,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable checksum */
/* [unimplemented] */
- tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
- arg ? "disabled" : "enabled");
+ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
+ arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
@@ -3148,8 +3107,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
do_notify = true;
}
- tun_debug(KERN_INFO, tun, "persist %s\n",
- arg ? "enabled" : "disabled");
+ netif_info(tun, drv, tun->dev, "persist %s\n",
+ arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
@@ -3161,8 +3120,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
tun->owner = owner;
do_notify = true;
- tun_debug(KERN_INFO, tun, "owner set to %u\n",
- from_kuid(&init_user_ns, tun->owner));
+ netif_info(tun, drv, tun->dev, "owner set to %u\n",
+ from_kuid(&init_user_ns, tun->owner));
break;
case TUNSETGROUP:
@@ -3174,29 +3133,28 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
tun->group = group;
do_notify = true;
- tun_debug(KERN_INFO, tun, "group set to %u\n",
- from_kgid(&init_user_ns, tun->group));
+ netif_info(tun, drv, tun->dev, "group set to %u\n",
+ from_kgid(&init_user_ns, tun->group));
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
- tun_debug(KERN_INFO, tun,
- "Linktype set failed because interface is up\n");
+ netif_info(tun, drv, tun->dev,
+ "Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
- tun_debug(KERN_INFO, tun, "linktype set to %d\n",
- tun->dev->type);
+ netif_info(tun, drv, tun->dev, "linktype set to %d\n",
+ tun->dev->type);
ret = 0;
}
break;
-#ifdef TUN_DEBUG
case TUNSETDEBUG:
- tun->debug = arg;
+ tun->msg_enable = (u32)arg;
break;
-#endif
+
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
@@ -3219,9 +3177,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCSIFHWADDR:
/* Set hw address */
- tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
- ifr.ifr_hwaddr.sa_data);
-
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
break;
@@ -3416,8 +3371,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
- DBG1(KERN_INFO, "tunX: tun_chr_open\n");
-
tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&tun_proto, 0);
if (!tfile)
@@ -3557,20 +3510,16 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
static u32 tun_get_msglevel(struct net_device *dev)
{
-#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
- return tun->debug;
-#else
- return -EOPNOTSUPP;
-#endif
+
+ return tun->msg_enable;
}
static void tun_set_msglevel(struct net_device *dev, u32 value)
{
-#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
- tun->debug = value;
-#endif
+
+ tun->msg_enable = value;
}
static int tun_get_coalesce(struct net_device *dev,
@@ -3597,6 +3546,7 @@ static int tun_set_coalesce(struct net_device *dev,
}
static const struct ethtool_ops tun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index c2c82e6391b4..5569077bd5b8 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -175,7 +175,11 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
u32 val, max, min;
/* clamp new_tx to sane values */
- min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ if (ctx->is_ndp16)
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ else
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
+
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
/* some devices set dwNtbOutMaxSize too low for the above default */
@@ -307,10 +311,17 @@ static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr
if (enable == (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
return len;
- if (enable && !ctx->delayed_ndp16) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- return -ENOMEM;
+ if (enable) {
+ if (ctx->is_ndp16 && !ctx->delayed_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ return -ENOMEM;
+ }
+ if (!ctx->is_ndp16 && !ctx->delayed_ndp32) {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ return -ENOMEM;
+ }
}
/* flush pending data before changing flag */
@@ -512,6 +523,9 @@ static int cdc_ncm_init(struct usbnet *dev)
dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
}
+ /* use ndp16 by default */
+ ctx->is_ndp16 = 1;
+
/* set NTB format, if both formats are supported.
*
* "The host shall only send this command while the NCM Data
@@ -519,14 +533,27 @@ static int cdc_ncm_init(struct usbnet *dev)
*/
if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
USB_CDC_NCM_NTB32_SUPPORTED) {
- dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
- if (err < 0)
+ if (ctx->drvflags & CDC_NCM_FLAG_PREFER_NTB32) {
+ ctx->is_ndp16 = 0;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 32-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB32_FORMAT,
+ iface_no, NULL, 0);
+ } else {
+ ctx->is_ndp16 = 1;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+ }
+ if (err < 0) {
+ ctx->is_ndp16 = 1;
dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+ }
}
/* set initial device values */
@@ -549,7 +576,10 @@ static int cdc_ncm_init(struct usbnet *dev)
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
/* set up maximum NDP size */
- ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ if (ctx->is_ndp16)
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ else
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp32) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe32);
/* initial coalescing timer interval */
ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
@@ -734,7 +764,10 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
- kfree(ctx->delayed_ndp16);
+ if (ctx->is_ndp16)
+ kfree(ctx->delayed_ndp16);
+ else
+ kfree(ctx->delayed_ndp32);
kfree(ctx);
}
@@ -772,10 +805,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
- int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
- __le16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -879,32 +910,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- /*
- * Some Huawei devices have been observed to come out of reset in NDP32 mode.
- * Let's check if this is the case, and set the device to NDP16 mode again if
- * needed.
- */
- if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
- err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &curr_ntb_format, 2);
- if (err < 0) {
- goto error2;
- }
-
- if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
- dev_info(&intf->dev, "resetting NTB format to 16-bit");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
-
- if (err < 0)
- goto error2;
- }
- }
-
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
@@ -929,9 +934,15 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- goto error2;
+ if (ctx->is_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ } else {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ goto error2;
+ }
dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
}
@@ -1055,7 +1066,7 @@ static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remai
/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
* allocating a new one within skb
*/
-static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
@@ -1110,12 +1121,73 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
return ndp16;
}
+static struct usb_cdc_ncm_ndp32 *cdc_ncm_ndp32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+{
+ struct usb_cdc_ncm_ndp32 *ndp32 = NULL;
+ struct usb_cdc_ncm_nth32 *nth32 = (void *)skb->data;
+ size_t ndpoffset = le32_to_cpu(nth32->dwNdpIndex);
+
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH32 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ if (ctx->delayed_ndp32->dwSignature == sign)
+ return ctx->delayed_ndp32;
+
+ /* We can only push a single NDP to the end. Return
+ * NULL to send what we've already got and queue this
+ * skb for later.
+ */
+ else if (ctx->delayed_ndp32->dwSignature)
+ return NULL;
+ }
+
+ /* follow the chain of NDPs, looking for a match */
+ while (ndpoffset) {
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb->data + ndpoffset);
+ if (ndp32->dwSignature == sign)
+ return ndp32;
+ ndpoffset = le32_to_cpu(ndp32->dwNextNdpIndex);
+ }
+
+ /* align new NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
+
+ /* verify that there is room for the NDP and the datagram (reserve) */
+ if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
+ return NULL;
+
+ /* link to it */
+ if (ndp32)
+ ndp32->dwNextNdpIndex = cpu_to_le32(skb->len);
+ else
+ nth32->dwNdpIndex = cpu_to_le32(skb->len);
+
+ /* push a new empty NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp32 = skb_put_zero(skb, ctx->max_ndp_size);
+ else
+ ndp32 = ctx->delayed_ndp32;
+
+ ndp32->dwSignature = sign;
+ ndp32->wLength = cpu_to_le32(sizeof(struct usb_cdc_ncm_ndp32) + sizeof(struct usb_cdc_ncm_dpe32));
+ return ndp32;
+}
+
struct sk_buff *
cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
+ union {
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_nth32 *nth32;
+ } nth;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
@@ -1179,11 +1251,19 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
ctx->tx_low_mem_val--;
}
- /* fill out the initial 16-bit NTB header */
- nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
- nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
- nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ if (ctx->is_ndp16) {
+ /* fill out the initial 16-bit NTB header */
+ nth.nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth.nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ } else {
+ /* fill out the initial 32-bit NTB header */
+ nth.nth32 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH32_SIGN);
+ nth.nth32->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->wSequence = cpu_to_le16(ctx->tx_seq++);
+ }
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
@@ -1205,13 +1285,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* get the appropriate NDP for this skb */
- ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ if (ctx->is_ndp16)
+ ndp.ndp16 = cdc_ncm_ndp16(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ else
+ ndp.ndp32 = cdc_ncm_ndp32(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
/* align beginning of next frame */
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_curr_size);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
+ if ((ctx->is_ndp16 && !ndp.ndp16) || (!ctx->is_ndp16 && !ndp.ndp32) ||
+ skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1233,13 +1317,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* calculate frame number withing this NDP */
- ndplen = le16_to_cpu(ndp16->wLength);
- index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+ if (ctx->is_ndp16) {
+ ndplen = le16_to_cpu(ndp.ndp16->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+
+ /* OK, add this skb */
+ ndp.ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+ ndp.ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+ ndp.ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ } else {
+ ndplen = le16_to_cpu(ndp.ndp32->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp32)) / sizeof(struct usb_cdc_ncm_dpe32) - 1;
- /* OK, add this skb */
- ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
- ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
- ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ ndp.ndp32->dpe32[index].dwDatagramLength = cpu_to_le32(skb->len);
+ ndp.ndp32->dpe32[index].dwDatagramIndex = cpu_to_le32(skb_out->len);
+ ndp.ndp32->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe32));
+ }
skb_put_data(skb_out, skb->data, skb->len);
ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
@@ -1286,13 +1379,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
- nth16->wNdpIndex = cpu_to_le16(skb_out->len);
- skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp.ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth32->dwNdpIndex = cpu_to_le32(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp32, ctx->max_ndp_size);
- /* Zero out delayed NDP - signature checking will naturally fail. */
- ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ ndp.ndp32 = memset(ctx->delayed_ndp32, 0, ctx->max_ndp_size);
+ }
}
/* If collected data size is less or equal ctx->min_tx_pkt
@@ -1314,8 +1416,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* set final frame length */
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ nth.nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ nth.nth32->dwBlockLength = cpu_to_le32(skb_out->len);
+ }
/* return skb */
ctx->tx_curr_skb = NULL;
@@ -1398,7 +1505,12 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+
+ if (ctx->is_ndp16)
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ else
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN));
+
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -1459,6 +1571,54 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_nth32 *nth32;
+ int len;
+ int ret = -EINVAL;
+
+ if (ctx == NULL)
+ goto error;
+
+ if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth32) +
+ sizeof(struct usb_cdc_ncm_ndp32))) {
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
+ goto error;
+ }
+
+ nth32 = (struct usb_cdc_ncm_nth32 *)skb_in->data;
+
+ if (nth32->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH32_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH32 signature <%#010x>\n",
+ le32_to_cpu(nth32->dwSignature));
+ goto error;
+ }
+
+ len = le32_to_cpu(nth32->dwBlockLength);
+ if (len > ctx->rx_max) {
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
+ goto error;
+ }
+
+ if ((ctx->rx_seq + 1) != le16_to_cpu(nth32->wSequence) &&
+ (ctx->rx_seq || le16_to_cpu(nth32->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth32->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth32->wSequence));
+ }
+ ctx->rx_seq = le16_to_cpu(nth32->wSequence);
+
+ ret = le32_to_cpu(nth32->dwNdpIndex);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth32);
+
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
@@ -1495,6 +1655,42 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ int ret = -EINVAL;
+
+ if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
+ goto error;
+ }
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (le16_to_cpu(ndp32->wLength) < USB_CDC_NCM_NDP32_LENGTH_MIN) {
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT32 length <%u>\n",
+ le16_to_cpu(ndp32->wLength));
+ goto error;
+ }
+
+ ret = ((le16_to_cpu(ndp32->wLength) -
+ sizeof(struct usb_cdc_ncm_ndp32)) /
+ sizeof(struct usb_cdc_ncm_dpe32));
+ ret--; /* we process NDP entries except for the last one */
+
+ if ((sizeof(struct usb_cdc_ncm_ndp32) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
+ ret = -EINVAL;
+ }
+
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp32);
+
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
@@ -1503,34 +1699,66 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
int nframes;
int x;
int offset;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
+ union {
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ struct usb_cdc_ncm_dpe32 *dpe32;
+ } dpe;
+
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
u32 payload = 0;
- ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ctx->is_ndp16)
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ else
+ ndpoffset = cdc_ncm_rx_verify_nth32(ctx, skb_in);
+
if (ndpoffset < 0)
goto error;
next_ndp:
- nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
- if (nframes < 0)
- goto error;
+ if (ctx->is_ndp16) {
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+ ndp.ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
- netif_dbg(dev, rx_err, dev->net,
- "invalid DPT16 signature <%#010x>\n",
- le32_to_cpu(ndp16->dwSignature));
- goto err_ndp;
+ if (ndp.ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp16->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe16 = ndp.ndp16->dpe16;
+ } else {
+ nframes = cdc_ncm_rx_verify_ndp32(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp.ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (ndp.ndp32->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT32 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp32->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe32 = ndp.ndp32->dpe32;
}
- dpe16 = ndp16->dpe16;
- for (x = 0; x < nframes; x++, dpe16++) {
- offset = le16_to_cpu(dpe16->wDatagramIndex);
- len = le16_to_cpu(dpe16->wDatagramLength);
+ for (x = 0; x < nframes; x++) {
+ if (ctx->is_ndp16) {
+ offset = le16_to_cpu(dpe.dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe.dpe16->wDatagramLength);
+ } else {
+ offset = le32_to_cpu(dpe.dpe32->dwDatagramIndex);
+ len = le32_to_cpu(dpe.dpe32->dwDatagramLength);
+ }
/*
* CDC NCM ch. 3.7
@@ -1561,10 +1789,19 @@ next_ndp:
usbnet_skb_return(dev, skb);
payload += len; /* count payload bytes in this NTB */
}
+
+ if (ctx->is_ndp16)
+ dpe.dpe16++;
+ else
+ dpe.dpe32++;
}
err_ndp:
/* are there more NDPs to process? */
- ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ctx->is_ndp16)
+ ndpoffset = le16_to_cpu(ndp.ndp16->wNextNdpIndex);
+ else
+ ndpoffset = le32_to_cpu(ndp.ndp32->dwNextNdpIndex);
+
if (ndpoffset && loopcount--)
goto next_ndp;
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index e15a472c6a54..099d84827004 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -77,11 +77,11 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
- /* Additionally, it has been reported that some Huawei E3372H devices, with
- * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
- * needing to be set to the NTB16 one again.
+ /* For many Huawei devices the NTB32 mode is the default and the best mode
+ * they work with. Huawei E5785 and E5885 devices refuse to work in NTB16 mode at all.
*/
- drvflags |= CDC_NCM_FLAG_RESET_NTB16;
+ drvflags |= CDC_NCM_FLAG_PREFER_NTB32;
+
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 709578f4d060..8f8d9883d363 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -891,7 +891,7 @@ struct fw_block {
struct fw_header {
u8 checksum[32];
char version[RTL_VER_SIZE];
- struct fw_block blocks[0];
+ struct fw_block blocks[];
} __packed;
/**
@@ -930,7 +930,7 @@ struct fw_mac {
__le32 reserved;
__le16 fw_ver_reg;
u8 fw_ver_data;
- char info[0];
+ char info[];
} __packed;
/**
@@ -982,7 +982,7 @@ struct fw_phy_nc {
__le16 bp_start;
__le16 bp_num;
__le16 bp[4];
- char info[0];
+ char info[];
} __packed;
enum rtl_fw_type {
@@ -3199,6 +3199,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
}
msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ break;
}
return data;
@@ -5380,7 +5382,10 @@ static void r8153_init(struct r8152 *tp)
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
+
msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ break;
}
data = r8153_phy_status(tp, 0);
@@ -5517,7 +5522,10 @@ static void r8153b_init(struct r8152 *tp)
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
+
msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ break;
}
data = r8153_phy_status(tp, 0);
@@ -6345,6 +6353,7 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
}
static const struct ethtool_ops ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = rtl8152_get_drvinfo,
.get_link = ethtool_op_get_link,
.nway_reset = rtl8152_nway_reset,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8cdc4415fa70..d4cbb9e8c63f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -328,7 +328,7 @@ static void veth_get_stats64(struct net_device *dev,
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+ veth_stats_tx(peer, &packets, &bytes);
tot->rx_bytes += bytes;
tot->rx_packets += packets;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2fe7a3188282..11f722460513 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -371,7 +371,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
- bool hdr_valid)
+ bool hdr_valid, unsigned int metasize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -393,6 +393,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
+ /* hdr_valid means no XDP, so we can copy the vnet header */
if (hdr_valid)
memcpy(hdr, p, hdr_len);
@@ -405,6 +406,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
copy = skb_tailroom(skb);
skb_put_data(skb, p, copy);
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
len -= copy;
offset += copy;
@@ -450,10 +456,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
- /* virtqueue want to use data area in-front of packet */
- if (unlikely(xdpf->metasize > 0))
- return -EOPNOTSUPP;
-
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
@@ -644,6 +646,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int delta = 0;
struct page *xdp_page;
int err;
+ unsigned int metasize = 0;
len -= vi->hdr_len;
stats->bytes += len;
@@ -683,8 +686,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -695,6 +698,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
+ metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
stats->xdp_tx++;
@@ -735,10 +739,13 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
- if (!delta) {
+ if (!xdp_prog) {
buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
- } /* keep zeroed vnet hdr since packet was changed by bpf */
+ } /* keep zeroed vnet hdr since XDP is loaded */
+
+ if (metasize)
+ skb_metadata_set(skb, metasize);
err:
return skb;
@@ -760,8 +767,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
- PAGE_SIZE, true);
+ struct sk_buff *skb =
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -793,6 +800,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
int err;
+ unsigned int metasize = 0;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -839,8 +847,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -848,24 +856,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
switch (act) {
case XDP_PASS:
+ metasize = xdp.data - xdp.data_meta;
+
/* recalculate offset to account for any header
- * adjustments. Note other cases do not build an
- * skb and avoid using offset
+ * adjustments and minus the metasize to copy the
+ * metadata in page_to_skb(). Note other cases do not
+ * build an skb and avoid using offset
*/
- offset = xdp.data -
- page_address(xdp_page) - vi->hdr_len;
+ offset = xdp.data - page_address(xdp_page) -
+ vi->hdr_len - metasize;
- /* recalculate len if xdp.data or xdp.data_end were
- * adjusted
+ /* recalculate len if xdp.data, xdp.data_end or
+ * xdp.data_meta were adjusted
*/
- len = xdp.data_end - xdp.data + vi->hdr_len;
+ len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
put_page(page);
- head_skb = page_to_skb(vi, rq, xdp_page,
- offset, len,
- PAGE_SIZE, false);
+ head_skb = page_to_skb(vi, rq, xdp_page, offset,
+ len, PAGE_SIZE, false,
+ metasize);
return head_skb;
}
break;
@@ -921,7 +932,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
+ metasize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -2166,48 +2178,13 @@ static void virtnet_get_channels(struct net_device *dev,
channels->other_count = 0;
}
-/* Check if the user is trying to change anything besides speed/duplex */
-static bool
-virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- /* cmd is always set so we need to clear it, validate the port type
- * and also without autonegotiation we can ignore advertising
- */
- diff1.base.speed = 0;
- diff2.base.port = PORT_OTHER;
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.duplex = 0;
- diff1.base.cmd = 0;
- diff1.base.link_mode_masks_nwords = 0;
-
- return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
- bitmap_empty(diff1.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS) &&
- bitmap_empty(diff1.link_modes.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS) &&
- bitmap_empty(diff1.link_modes.lp_advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
static int virtnet_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
- u32 speed;
-
- speed = cmd->base.speed;
- /* don't allow custom speed and duplex */
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !virtnet_validate_ethtool_cmd(cmd))
- return -EINVAL;
- vi->speed = speed;
- vi->duplex = cmd->base.duplex;
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &vi->speed, &vi->duplex);
}
static int virtnet_get_link_ksettings(struct net_device *dev,
@@ -2225,23 +2202,14 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_SCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
int i, napi_weight;
- if (ec->tx_max_coalesced_frames > 1)
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
return -EINVAL;
- ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
-
- /* disallow changes to fields not explicitly tested above */
- if (memcmp(ec, &ec_default, sizeof(ec_default)))
- return -EINVAL;
-
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -2296,6 +2264,7 @@ static void virtnet_update_settings(struct virtnet_info *vi)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 1e4b9ba70983..6528940ce5f3 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -780,27 +780,6 @@ vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
if (!VMXNET3_VERSION_GE_3(adapter))
return -EOPNOTSUPP;
- if (ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval) {
- return -EINVAL;
- }
-
if ((ec->rx_coalesce_usecs == 0) &&
(ec->use_adaptive_rx_coalesce == 0) &&
(ec->tx_max_coalesced_frames == 0) &&
@@ -891,6 +870,9 @@ done:
}
static const struct ethtool_ops vmxnet3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
.get_regs = vmxnet3_get_regs,
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index fef10886ca4a..e481674485c2 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -95,6 +95,7 @@ out_bad:
}
static const struct ethtool_ops wil_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 282d0bc14e8e..a3a257089696 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -723,6 +723,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x200000;
case BRCM_CC_4359_CHIP_ID:
return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
+ case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 5105f62767fb..39381cbde89e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -52,6 +52,7 @@ BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
+BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
@@ -70,6 +71,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
@@ -2105,6 +2107,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index f9047db6a11d..3a08252f1a53 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1938,6 +1938,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
+ brcmf_sdio_rxfail(bus, true, true);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index d1037b6ef2d6..c6c4be05159d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -44,6 +44,7 @@
#define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_4359_CHIP_ID 0x4359
#define BRCM_CC_43602_CHIP_ID 43602
+#define BRCM_CC_4364_CHIP_ID 0x4364
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
#define BRCM_CC_43664_CHIP_ID 43664
@@ -74,6 +75,7 @@
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
+#define BRCM_PCIE_4364_DEVICE_ID 0x4464
#define BRCM_PCIE_4365_DEVICE_ID 0x43ca
#define BRCM_PCIE_4365_2G_DEVICE_ID 0x43cb
#define BRCM_PCIE_4365_5G_DEVICE_ID 0x43cc
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 70b29bf16bb9..60296a754af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -308,7 +308,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
}
/* PHY_SKU section is mandatory in B0 */
- if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
IWL_ERR(mvm,
"Can't parse phy_sku in B0, empty sections\n");
return NULL;
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.c b/drivers/net/wireless/marvell/mwifiex/11ac.c
index 59d23fb2365f..756f019ef28a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.c
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11ac
+ * NXP Wireless LAN device driver: 802.11ac
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.h b/drivers/net/wireless/marvell/mwifiex/11ac.h
index 1ca92c7a8a4a..29e83468cf3f 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.h
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11ac
+ * NXP Wireless LAN device driver: 802.11ac
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 238accfe4f41..d2ee6469e67b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11h
+ * NXP Wireless LAN device driver: 802.11h
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e435f801bc91..6696bce56178 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n
+ * NXP Wireless LAN device driver: 802.11n
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 33268ce2cd82..83a88eecbda6 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n
+ * NXP Wireless LAN device driver: 802.11n
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 088612438530..46f41dbcf30d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n Aggregation
+ * NXP Wireless LAN device driver: 802.11n Aggregation
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
index 8279b159da7c..382c1265c441 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n Aggregation
+ * NXP Wireless LAN device driver: 802.11n Aggregation
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 05a3c61ac603..0bdafe9f66db 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 22d991f514c8..465f244b3636 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index d89684168500..0a6da6fe2f89 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: CFG80211
+ * NXP Wireless LAN device driver: CFG80211
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.h b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
index 908367857d58..530a63f13f14 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: CFG80211
+ * NXP Wireless LAN device driver: CFG80211
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index f1522fb1c1e8..fb91ecfc5546 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: Channel, Frequence and Power
+ * NXP Wireless LAN device driver: Channel, Frequence and Power
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index e8788c35a453..7e4b8cd52605 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: commands and events
+ * NXP Wireless LAN device driver: commands and events
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 8ab114cf3467..dded92db1f37 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: debugfs
+ * NXP Wireless LAN device driver: debugfs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 46696ea0b23e..6bd23c9b1eed 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: generic data structures and APIs
+ * NXP Wireless LAN device driver: generic data structures and APIs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ethtool.c b/drivers/net/wireless/marvell/mwifiex/ethtool.c
index 58400c69ab26..9bdad3f59039 100644
--- a/drivers/net/wireless/marvell/mwifiex/ethtool.c
+++ b/drivers/net/wireless/marvell/mwifiex/ethtool.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: ethtool
+ * NXP Wireless LAN device driver: ethtool
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 1fb76d2f5d3f..4dfdf928f705 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: Firmware specific macros & structures
+ * NXP Wireless LAN device driver: Firmware specific macros & structures
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 580387f9f12a..811abe963af2 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -1,11 +1,11 @@
/*
- * Marvell Wireless LAN device driver: management IE handling- setting and
+ * NXP Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 1aa93e7e9835..82d69bc3aaaf 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: HW/FW Initialization
+ * NXP Wireless LAN device driver: HW/FW Initialization
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 0dd592ea6e83..3db449efa167 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: ioctl data structures & APIs
+ * NXP Wireless LAN device driver: ioctl data structures & APIs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index d87aeff70cef..5934f7147547 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: association and ad-hoc start/join
+ * NXP Wireless LAN device driver: association and ad-hoc start/join
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 7d94695e7961..529099137644 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: major functions
+ * NXP Wireless LAN device driver: major functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index fa5634af40f7..afaffc325452 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: major data structures and prototypes
+ * NXP Wireless LAN device driver: major data structures and prototypes
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index fc1706d0647d..87b4ccca4b9a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: PCIE specific handling
+ * NXP Wireless LAN device driver: PCIE specific handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index f7ce9b6db6b4..fc59b522f670 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -3,10 +3,10 @@
* @brief This file contains definitions for PCI-E interface.
* driver.
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a7968a84aaf8..ff932627a46c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: scan ioctl and command handling
+ * NXP Wireless LAN device driver: scan ioctl and command handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index fec38b6e86ff..6a2dcb01caf4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: SDIO specific handling
+ * NXP Wireless LAN device driver: SDIO specific handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index f672bdf52cc1..71cd8629b28e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: SDIO specific definitions
+ * NXP Wireless LAN device driver: SDIO specific definitions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 4ed10cf82f9a..0bd93f26bd7f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station command handling
+ * NXP Wireless LAN device driver: station command handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 20c206da0631..f21660149f58 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station command response handling
+ * NXP Wireless LAN device driver: station command response handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 5fdffb114913..bc79ca4cb803 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station event handling
+ * NXP Wireless LAN device driver: station event handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index fbfa0b15d0c8..653f9e094256 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: functions for station ioctl
+ * NXP Wireless LAN device driver: functions for station ioctl
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 52a2ce2e78b0..0d2adf887900 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station RX data handling
+ * NXP Wireless LAN device driver: station RX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 37c24b95e642..241305377e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station TX data handling
+ * NXP Wireless LAN device driver: station TX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index f8f282ce39bd..97bb87c3676b 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1,9 +1,10 @@
-/* Marvell Wireless LAN device driver: TDLS handling
+/*
+ * NXP Wireless LAN device driver: TDLS handling
*
- * Copyright (C) 2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available on the worldwide web at
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index e3c1446dd847..a8479b879382 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: generic TX/RX data handling
+ * NXP Wireless LAN device driver: generic TX/RX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 0939a8c8f3ab..b48a85d791f6 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP specific command handling
+ * NXP Wireless LAN device driver: AP specific command handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 86bfa1b9ef9d..9121447e2701 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP event handling
+ * NXP Wireless LAN device driver: AP event handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 354b09c5e8dc..77c8595f84f8 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP TX and RX data handling
+ * NXP Wireless LAN device driver: AP TX and RX data handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index c2365eeb7016..6f3cfde4654c 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: USB specific handling
+ * NXP Wireless LAN device driver: USB specific handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index 37abd228a84f..d822ec15b7e6 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -1,10 +1,10 @@
/*
* This file contains definitions for mwifiex USB interface driver.
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 3b0d31827681..de89a1e710b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: utility functions
+ * NXP Wireless LAN device driver: utility functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index 7cafcecd7b85..44aa80eb7827 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: utility functions
+ * NXP Wireless LAN device driver: utility functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 132f9e8ed68c..a06fff199ea3 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: WMM
+ * NXP Wireless LAN device driver: WMM
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 38f09762bd2f..04d7da95e307 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: WMM
+ * NXP Wireless LAN device driver: WMM
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 99bbc74acda8..d7a1ddc9e407 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o airtime.o
+ tx.o agg-rx.o mcu.o
mt76-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 59c187898132..f77f03530259 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -4,7 +4,13 @@
*/
#include "mt76.h"
-#define REORDER_TIMEOUT (HZ / 10)
+static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
+{
+ /* Currently voice traffic (AC_VO) always runs without aggregation,
+ * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
+ * for non AC_BK/AC_BE and set smaller timeout for it. */
+ return HZ / (tidno >= 4 ? 25 : 10);
+}
static void
mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
@@ -71,7 +77,8 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
nframes--;
status = (struct mt76_rx_status *)skb->cb;
if (!time_after(jiffies,
- status->reorder_time + REORDER_TIMEOUT))
+ status->reorder_time +
+ mt76_aggr_tid_to_timeo(tid->num)))
continue;
mt76_rx_aggr_release_frames(tid, frames, status->seqno);
@@ -101,7 +108,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
- REORDER_TIMEOUT);
+ mt76_aggr_tid_to_timeo(tid->num));
mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
@@ -225,7 +232,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
mt76_rx_aggr_release_head(tid, frames);
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
- REORDER_TIMEOUT);
+ mt76_aggr_tid_to_timeo(tid->num));
out:
spin_unlock_bh(&tid->lock);
@@ -245,6 +252,7 @@ int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
tid->dev = dev;
tid->head = ssn;
tid->size = size;
+ tid->num = tidno;
INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
spin_lock_init(&tid->lock);
@@ -268,6 +276,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
if (!skb)
continue;
+ tid->reorder_buf[i] = NULL;
tid->nframes--;
dev_kfree_skb(skb);
}
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
deleted file mode 100644
index a4a785467748..000000000000
--- a/drivers/net/wireless/mediatek/mt76/airtime.c
+++ /dev/null
@@ -1,326 +0,0 @@
-// SPDX-License-Identifier: ISC
-/*
- * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
- */
-
-#include "mt76.h"
-
-#define AVG_PKT_SIZE 1024
-
-/* Number of bits for an average sized packet */
-#define MCS_NBITS (AVG_PKT_SIZE << 3)
-
-/* Number of symbols for a packet with (bps) bits per symbol */
-#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
-
-/* Transmission time (1024 usec) for a packet containing (syms) * symbols */
-#define MCS_SYMBOL_TIME(sgi, syms) \
- (sgi ? \
- ((syms) * 18 * 1024 + 4 * 1024) / 5 : /* syms * 3.6 us */ \
- ((syms) * 1024) << 2 /* syms * 4 us */ \
- )
-
-/* Transmit duration for the raw data part of an average sized packet */
-#define MCS_DURATION(streams, sgi, bps) \
- MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
-
-#define BW_20 0
-#define BW_40 1
-#define BW_80 2
-
-/*
- * Define group sort order: HT40 -> SGI -> #streams
- */
-#define MT_MAX_STREAMS 4
-#define MT_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
-#define MT_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
-
-#define MT_HT_GROUPS_NB (MT_MAX_STREAMS * \
- MT_HT_STREAM_GROUPS)
-#define MT_VHT_GROUPS_NB (MT_MAX_STREAMS * \
- MT_VHT_STREAM_GROUPS)
-#define MT_GROUPS_NB (MT_HT_GROUPS_NB + \
- MT_VHT_GROUPS_NB)
-
-#define MT_HT_GROUP_0 0
-#define MT_VHT_GROUP_0 (MT_HT_GROUP_0 + MT_HT_GROUPS_NB)
-
-#define MCS_GROUP_RATES 10
-
-#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
- MT_HT_GROUP_0 + \
- MT_MAX_STREAMS * 2 * _ht40 + \
- MT_MAX_STREAMS * _sgi + \
- _streams - 1
-
-#define _MAX(a, b) (((a)>(b))?(a):(b))
-
-#define GROUP_SHIFT(duration) \
- _MAX(0, 16 - __builtin_clz(duration))
-
-/* MCS rate information for an MCS group */
-#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
- [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
- .shift = _s, \
- .duration = { \
- MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
- } \
-}
-
-#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
- GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
-
-#define MCS_GROUP(_streams, _sgi, _ht40) \
- __MCS_GROUP(_streams, _sgi, _ht40, \
- MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
-
-#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
- (MT_VHT_GROUP_0 + \
- MT_MAX_STREAMS * 2 * (_bw) + \
- MT_MAX_STREAMS * (_sgi) + \
- (_streams) - 1)
-
-#define BW2VBPS(_bw, r3, r2, r1) \
- (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
-
-#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
- [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
- .shift = _s, \
- .duration = { \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 234, 108, 52)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 351, 162, 78)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 468, 216, 104)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 702, 324, 156)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 936, 432, 208)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1560, 720, 346)) >> _s \
- } \
-}
-
-#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
- GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)))
-
-#define VHT_GROUP(_streams, _sgi, _bw) \
- __VHT_GROUP(_streams, _sgi, _bw, \
- VHT_GROUP_SHIFT(_streams, _sgi, _bw))
-
-struct mcs_group {
- u8 shift;
- u16 duration[MCS_GROUP_RATES];
-};
-
-static const struct mcs_group airtime_mcs_groups[] = {
- MCS_GROUP(1, 0, BW_20),
- MCS_GROUP(2, 0, BW_20),
- MCS_GROUP(3, 0, BW_20),
- MCS_GROUP(4, 0, BW_20),
-
- MCS_GROUP(1, 1, BW_20),
- MCS_GROUP(2, 1, BW_20),
- MCS_GROUP(3, 1, BW_20),
- MCS_GROUP(4, 1, BW_20),
-
- MCS_GROUP(1, 0, BW_40),
- MCS_GROUP(2, 0, BW_40),
- MCS_GROUP(3, 0, BW_40),
- MCS_GROUP(4, 0, BW_40),
-
- MCS_GROUP(1, 1, BW_40),
- MCS_GROUP(2, 1, BW_40),
- MCS_GROUP(3, 1, BW_40),
- MCS_GROUP(4, 1, BW_40),
-
- VHT_GROUP(1, 0, BW_20),
- VHT_GROUP(2, 0, BW_20),
- VHT_GROUP(3, 0, BW_20),
- VHT_GROUP(4, 0, BW_20),
-
- VHT_GROUP(1, 1, BW_20),
- VHT_GROUP(2, 1, BW_20),
- VHT_GROUP(3, 1, BW_20),
- VHT_GROUP(4, 1, BW_20),
-
- VHT_GROUP(1, 0, BW_40),
- VHT_GROUP(2, 0, BW_40),
- VHT_GROUP(3, 0, BW_40),
- VHT_GROUP(4, 0, BW_40),
-
- VHT_GROUP(1, 1, BW_40),
- VHT_GROUP(2, 1, BW_40),
- VHT_GROUP(3, 1, BW_40),
- VHT_GROUP(4, 1, BW_40),
-
- VHT_GROUP(1, 0, BW_80),
- VHT_GROUP(2, 0, BW_80),
- VHT_GROUP(3, 0, BW_80),
- VHT_GROUP(4, 0, BW_80),
-
- VHT_GROUP(1, 1, BW_80),
- VHT_GROUP(2, 1, BW_80),
- VHT_GROUP(3, 1, BW_80),
- VHT_GROUP(4, 1, BW_80),
-};
-
-static u32
-mt76_calc_legacy_rate_duration(const struct ieee80211_rate *rate, bool short_pre,
- int len)
-{
- u32 duration;
-
- switch (rate->hw_value >> 8) {
- case MT_PHY_TYPE_CCK:
- duration = 144 + 48; /* preamble + PLCP */
- if (short_pre)
- duration >>= 1;
-
- duration += 10; /* SIFS */
- break;
- case MT_PHY_TYPE_OFDM:
- duration = 20 + 16; /* premable + SIFS */
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- len <<= 3;
- duration += (len * 10) / rate->bitrate;
-
- return duration;
-}
-
-u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
- int len)
-{
- struct ieee80211_supported_band *sband;
- const struct ieee80211_rate *rate;
- bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
- bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
- int bw, streams;
- u32 duration;
- int group, idx;
-
- switch (status->bw) {
- case RATE_INFO_BW_20:
- bw = BW_20;
- break;
- case RATE_INFO_BW_40:
- bw = BW_40;
- break;
- case RATE_INFO_BW_80:
- bw = BW_80;
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- switch (status->encoding) {
- case RX_ENC_LEGACY:
- if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
- return 0;
-
- sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx >= sband->n_bitrates)
- return 0;
-
- rate = &sband->bitrates[status->rate_idx];
-
- return mt76_calc_legacy_rate_duration(rate, sp, len);
- case RX_ENC_VHT:
- streams = status->nss;
- idx = status->rate_idx;
- group = VHT_GROUP_IDX(streams, sgi, bw);
- break;
- case RX_ENC_HT:
- streams = ((status->rate_idx >> 3) & 3) + 1;
- idx = status->rate_idx & 7;
- group = HT_GROUP_IDX(streams, sgi, bw);
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- if (WARN_ON_ONCE(streams > 4))
- return 0;
-
- duration = airtime_mcs_groups[group].duration[idx];
- duration <<= airtime_mcs_groups[group].shift;
- duration *= len;
- duration /= AVG_PKT_SIZE;
- duration /= 1024;
-
- duration += 36 + (streams << 2);
-
- return duration;
-}
-
-u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
- int len)
-{
- struct mt76_rx_status stat = {
- .band = info->band,
- };
- u32 duration = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
- struct ieee80211_tx_rate *rate = &info->status.rates[i];
- u32 cur_duration;
-
- if (rate->idx < 0 || !rate->count)
- break;
-
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_80;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_40;
- else
- stat.bw = RATE_INFO_BW_20;
-
- stat.enc_flags = 0;
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- stat.rate_idx = rate->idx;
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- stat.encoding = RX_ENC_VHT;
- stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
- stat.nss = ieee80211_rate_get_vht_nss(rate);
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- stat.encoding = RX_ENC_HT;
- } else {
- stat.encoding = RX_ENC_LEGACY;
- }
-
- cur_duration = mt76_calc_rx_airtime(dev, &stat, len);
- duration += cur_duration * rate->count;
- }
-
- return duration;
-}
-EXPORT_SYMBOL_GPL(mt76_calc_tx_airtime);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6173c80189ba..75e659774e07 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -132,6 +132,11 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
writel(q->ndesc, &q->regs->ring_size);
q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
writel(q->head, &q->regs->cpu_idx);
}
@@ -141,7 +146,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
- unsigned int n_swq_queued[4] = {};
+ unsigned int n_swq_queued[8] = {};
unsigned int n_queued = 0;
bool wake = false;
int i, last;
@@ -178,15 +183,25 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
spin_lock_bh(&q->lock);
q->queued -= n_queued;
- for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ for (i = 0; i < 4; i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
- if (flush)
+ /* ext PHY */
+ for (i = 0; i < 4; i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
+ }
+
+ if (flush) {
mt76_dma_sync_idx(dev, q);
+ mt76_dma_kick_queue(dev, q);
+ }
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
@@ -238,7 +253,9 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ if (flush)
+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
q->tail = (q->tail + 1) % q->ndesc;
@@ -247,12 +264,6 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
return mt76_dma_get_buf(dev, q, idx, len, info, more);
}
-static void
-mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-{
- writel(q->head, &q->regs->cpu_idx);
-}
-
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
@@ -261,10 +272,13 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_queue_buf buf;
dma_addr_t addr;
+ if (q->queued + 1 >= q->ndesc - 1)
+ goto error;
+
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
- return -ENOMEM;
+ goto error;
buf.addr = addr;
buf.len = skb->len;
@@ -275,6 +289,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
spin_unlock_bh(&q->lock);
return 0;
+
+error:
+ dev_kfree_skb(skb);
+ return -ENOMEM;
}
static int
@@ -286,6 +304,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
@@ -295,7 +314,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
t = mt76_get_txwi(dev);
if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
return -ENOMEM;
}
txwi = mt76_get_txwi_ptr(dev, t);
@@ -427,7 +447,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
int i;
for (i = 0; i < q->ndesc; i++)
- q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
@@ -447,10 +467,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
- offset += q->buf_offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
- q->buf_size);
+ if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
+ offset += q->buf_offset;
+ skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
+ q->buf_size);
+ }
if (more)
return;
@@ -528,6 +551,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
+ local_bh_disable();
rcu_read_lock();
do {
@@ -537,6 +561,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
rcu_read_unlock();
+ local_bh_enable();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
@@ -555,7 +580,6 @@ mt76_dma_init(struct mt76_dev *dev)
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
- skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 804224e81103..c236e303ccfd 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -64,6 +64,16 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
goto out_put_node;
}
+ if (of_property_read_bool(dev->dev->of_node, "big-endian")) {
+ u8 *data = (u8 *)dev->eeprom.data;
+ int i;
+
+ /* convert eeprom data in Little Endian */
+ for (i = 0; i < round_down(len, 2); i += 2)
+ put_unaligned_le16(get_unaligned_be16(&data[i]),
+ &data[i]);
+ }
+
out_put_node:
of_node_put(np);
return ret;
@@ -77,13 +87,11 @@ mt76_eeprom_override(struct mt76_dev *dev)
{
#ifdef CONFIG_OF
struct device_node *np = dev->dev->of_node;
- const u8 *mac;
-
- if (!np)
- return;
+ const u8 *mac = NULL;
- mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
+ if (np)
+ mac = of_get_mac_address(np);
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(dev->macaddr, mac);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 96018fd65779..f44f99184c10 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -121,7 +121,7 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
- int i, nstream = hweight8(dev->antenna_mask);
+ int i, nstream = hweight8(dev->phy.antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
@@ -156,9 +156,9 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
{
if (dev->cap.has_2ghz)
- mt76_init_stream_cap(dev, &dev->sband_2g.sband, false);
+ mt76_init_stream_cap(dev, &dev->phy.sband_2g.sband, false);
if (dev->cap.has_5ghz)
- mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht);
+ mt76_init_stream_cap(dev, &dev->phy.sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
@@ -187,8 +187,6 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
sband->n_channels = n_chan;
sband->bitrates = rates;
sband->n_bitrates = n_rates;
- dev->chandef.chan = &sband->channels[0];
- dev->chan_state = &msband->chan[0];
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
@@ -223,9 +221,9 @@ static int
mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
int n_rates)
{
- dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->phy.sband_2g.sband;
- return mt76_init_sband(dev, &dev->sband_2g,
+ return mt76_init_sband(dev, &dev->phy.sband_2g,
mt76_channels_2ghz,
ARRAY_SIZE(mt76_channels_2ghz),
rates, n_rates, false);
@@ -235,18 +233,19 @@ static int
mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
int n_rates, bool vht)
{
- dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+ dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->phy.sband_5g.sband;
- return mt76_init_sband(dev, &dev->sband_5g,
+ return mt76_init_sband(dev, &dev->phy.sband_5g,
mt76_channels_5ghz,
ARRAY_SIZE(mt76_channels_5ghz),
rates, n_rates, vht);
}
static void
-mt76_check_sband(struct mt76_dev *dev, int band)
+mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
+ enum nl80211_band band)
{
- struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+ struct ieee80211_supported_band *sband = &msband->sband;
bool found = false;
int i;
@@ -261,20 +260,145 @@ mt76_check_sband(struct mt76_dev *dev, int band)
break;
}
- if (found)
+ if (found) {
+ phy->chandef.chan = &sband->channels[0];
+ phy->chan_state = &msband->chan[0];
return;
+ }
sband->n_channels = 0;
- dev->hw->wiphy->bands[band] = NULL;
+ phy->hw->wiphy->bands[band] = NULL;
}
+static void
+mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
+{
+ struct wiphy *wiphy = hw->wiphy;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
+
+ wiphy->available_antennas_tx = dev->phy.antenna_mask;
+ wiphy->available_antennas_rx = dev->phy.antenna_mask;
+
+ hw->txq_data_size = sizeof(struct mt76_txq);
+
+ if (!hw->max_tx_fragments)
+ hw->max_tx_fragments = 16;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+}
+
+struct mt76_phy *
+mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops)
+{
+ struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
+ unsigned int phy_size, chan_size;
+ unsigned int size_2g, size_5g;
+ void *priv;
+
+ phy_size = ALIGN(sizeof(*phy), 8);
+ chan_size = sizeof(dev->phy.sband_2g.chan[0]);
+ size_2g = ALIGN(ARRAY_SIZE(mt76_channels_2ghz) * chan_size, 8);
+ size_5g = ALIGN(ARRAY_SIZE(mt76_channels_5ghz) * chan_size, 8);
+
+ size += phy_size + size_2g + size_5g;
+ hw = ieee80211_alloc_hw(size, ops);
+ if (!hw)
+ return NULL;
+
+ phy = hw->priv;
+ phy->dev = dev;
+ phy->hw = hw;
+
+ mt76_phy_init(dev, hw);
+
+ priv = hw->priv + phy_size;
+
+ phy->sband_2g = dev->phy.sband_2g;
+ phy->sband_2g.chan = priv;
+ priv += size_2g;
+
+ phy->sband_5g = dev->phy.sband_5g;
+ phy->sband_5g.chan = priv;
+ priv += size_5g;
+
+ phy->priv = priv;
+
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
+
+ mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_phy);
+
+int
+mt76_register_phy(struct mt76_phy *phy)
+{
+ int ret;
+
+ ret = ieee80211_register_hw(phy->hw);
+ if (ret)
+ return ret;
+
+ phy->dev->phy2 = phy;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_register_phy);
+
+void
+mt76_unregister_phy(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ dev->phy2 = NULL;
+ mt76_tx_status_check(dev, NULL, true);
+ ieee80211_unregister_hw(phy->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+
struct mt76_dev *
mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops)
{
struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
struct mt76_dev *dev;
+ int i;
hw = ieee80211_alloc_hw(size, ops);
if (!hw)
@@ -285,6 +409,10 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->dev = pdev;
dev->drv = drv_ops;
+ phy = &dev->phy;
+ phy->dev = dev;
+ phy->hw = hw;
+
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
@@ -292,6 +420,15 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
init_waitqueue_head(&dev->tx_wait);
skb_queue_head_init(&dev->status_list);
+ skb_queue_head_init(&dev->mcu.res_q);
+ init_waitqueue_head(&dev->mcu.wait);
+ mutex_init(&dev->mcu.mutex);
+
+ INIT_LIST_HEAD(&dev->txwi_cache);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+ skb_queue_head_init(&dev->rx_skb[i]);
+
tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
return dev;
@@ -302,51 +439,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates)
{
struct ieee80211_hw *hw = dev->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct mt76_phy *phy = &dev->phy;
int ret;
dev_set_drvdata(dev->dev, dev);
-
- INIT_LIST_HEAD(&dev->txwi_cache);
-
- SET_IEEE80211_DEV(hw, dev->dev);
- SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
-
- wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
-
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
-
- wiphy->available_antennas_tx = dev->antenna_mask;
- wiphy->available_antennas_rx = dev->antenna_mask;
-
- hw->txq_data_size = sizeof(struct mt76_txq);
- hw->max_tx_fragments = 16;
-
- ieee80211_hw_set(hw, SIGNAL_DBM);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
- ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
- ieee80211_hw_set(hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
- ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
- ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
- ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
- ieee80211_hw_set(hw, TX_AMSDU);
- ieee80211_hw_set(hw, TX_FRAG_LIST);
- ieee80211_hw_set(hw, MFP_CAPABLE);
- ieee80211_hw_set(hw, AP_LINK_PS);
- ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
+ mt76_phy_init(dev, hw);
if (dev->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
@@ -360,9 +457,9 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
}
- wiphy_read_of_freq_limits(dev->hw->wiphy);
- mt76_check_sband(dev, NL80211_BAND_2GHZ);
- mt76_check_sband(dev, NL80211_BAND_5GHZ);
+ wiphy_read_of_freq_limits(hw->wiphy);
+ mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
ret = mt76_led_init(dev);
@@ -394,7 +491,10 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
- if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
dev_kfree_skb(skb);
return;
}
@@ -403,13 +503,16 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_rx);
-bool mt76_has_tx_pending(struct mt76_dev *dev)
+bool mt76_has_tx_pending(struct mt76_phy *phy)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_queue *q;
- int i;
+ int i, offset;
- for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- q = dev->q_tx[i].q;
+ offset = __MT_TXQ_MAX * (phy != &dev->phy);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ q = dev->q_tx[offset + i].q;
if (q && q->queued)
return true;
}
@@ -419,37 +522,45 @@ bool mt76_has_tx_pending(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
static struct mt76_channel_state *
-mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
{
struct mt76_sband *msband;
int idx;
if (c->band == NL80211_BAND_2GHZ)
- msband = &dev->sband_2g;
+ msband = &phy->sband_2g;
else
- msband = &dev->sband_5g;
+ msband = &phy->sband_5g;
idx = c - &msband->sband.channels[0];
return &msband->chan[idx];
}
+static void
+mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
+{
+ struct mt76_channel_state *state = phy->chan_state;
+
+ state->cc_active += ktime_to_us(ktime_sub(time,
+ phy->survey_time));
+ phy->survey_time = time;
+}
+
void mt76_update_survey(struct mt76_dev *dev)
{
- struct mt76_channel_state *state = dev->chan_state;
ktime_t cur_time;
- if (!test_bit(MT76_STATE_RUNNING, &dev->state))
- return;
-
if (dev->drv->update_survey)
dev->drv->update_survey(dev);
cur_time = ktime_get_boottime();
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- dev->survey_time));
- dev->survey_time = cur_time;
+ mt76_update_survey_active_time(&dev->phy, cur_time);
+ if (dev->phy2)
+ mt76_update_survey_active_time(dev->phy2, cur_time);
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
+ struct mt76_channel_state *state = dev->phy.chan_state;
+
spin_lock_bh(&dev->cc_lock);
state->cc_bss_rx += dev->cur_cc_bss_rx;
dev->cur_cc_bss_rx = 0;
@@ -458,31 +569,33 @@ void mt76_update_survey(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_dev *dev)
+void mt76_set_channel(struct mt76_phy *phy)
{
- struct ieee80211_hw *hw = dev->hw;
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = phy->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
- wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
+ wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(dev);
- dev->chandef = *chandef;
- dev->chan_state = mt76_channel_state(dev, chandef->chan);
+ phy->chandef = *chandef;
+ phy->chan_state = mt76_channel_state(phy, chandef->chan);
if (!offchannel)
- dev->main_chan = chandef->chan;
+ phy->main_chan = chandef->chan;
- if (chandef->chan != dev->main_chan)
- memset(dev->chan_state, 0, sizeof(*dev->chan_state));
+ if (chandef->chan != phy->main_chan)
+ memset(phy->chan_state, 0, sizeof(*phy->chan_state));
}
EXPORT_SYMBOL_GPL(mt76_set_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
struct mt76_sband *sband;
struct ieee80211_channel *chan;
struct mt76_channel_state *state;
@@ -492,10 +605,10 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
if (idx == 0 && dev->drv->update_survey)
mt76_update_survey(dev);
- sband = &dev->sband_2g;
+ sband = &phy->sband_2g;
if (idx >= sband->sband.n_channels) {
idx -= sband->sband.n_channels;
- sband = &dev->sband_5g;
+ sband = &phy->sband_5g;
}
if (idx >= sband->sband.n_channels) {
@@ -504,13 +617,16 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
}
chan = &sband->sband.channels[idx];
- state = mt76_channel_state(dev, chan);
+ state = mt76_channel_state(phy, chan);
memset(survey, 0, sizeof(*survey));
survey->channel = chan;
survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
survey->filled |= dev->drv->survey_flags;
- if (chan == dev->main_chan) {
+ if (state->noise)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (chan == phy->main_chan) {
survey->filled |= SURVEY_INFO_IN_USE;
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
@@ -520,6 +636,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
survey->time_busy = div_u64(state->cc_busy, 1000);
survey->time_rx = div_u64(state->cc_rx, 1000);
survey->time = div_u64(state->cc_active, 1000);
+ survey->noise = state->noise;
spin_lock_bh(&dev->cc_lock);
survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
@@ -555,8 +672,12 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
-static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+static void
+mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_hw **hw,
+ struct ieee80211_sta **sta)
{
+
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76_rx_status mstat;
@@ -581,7 +702,8 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
memcpy(status->chain_signal, mstat.chain_signal,
sizeof(mstat.chain_signal));
- return wcid_to_sta(mstat.wcid);
+ *sta = wcid_to_sta(mstat.wcid);
+ *hw = mt76_phy_hw(dev, mstat.ext_phy);
}
static int
@@ -628,10 +750,18 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
int len)
{
struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_rx_status info = {
+ .enc_flags = status->enc_flags,
+ .rate_idx = status->rate_idx,
+ .encoding = status->encoding,
+ .band = status->band,
+ .nss = status->nss,
+ .bw = status->bw,
+ };
struct ieee80211_sta *sta;
u32 airtime;
- airtime = mt76_calc_rx_airtime(dev, status, len);
+ airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
spin_lock(&dev->cc_lock);
dev->cur_cc_bss_rx += airtime;
spin_unlock(&dev->cc_lock);
@@ -707,12 +837,14 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
bool ps;
int i;
+ hw = mt76_phy_hw(dev, status->ext_phy);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
- sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
if (sta)
wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
}
@@ -770,7 +902,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
if (!skb_queue_empty(&mtxq->retry_q))
- ieee80211_schedule_txq(dev->hw, sta->txq[i]);
+ ieee80211_schedule_txq(hw, sta->txq[i]);
}
}
@@ -778,6 +910,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi)
{
struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_lock(&dev->rx_lock);
@@ -787,8 +920,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
continue;
}
- sta = mt76_rx_convert(skb);
- ieee80211_rx_napi(dev->hw, sta, skb, napi);
+ mt76_rx_convert(dev, skb, &hw, &sta);
+ ieee80211_rx_napi(hw, sta, skb, napi);
}
spin_unlock(&dev->rx_lock);
}
@@ -812,7 +945,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
static int
mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool ext_phy)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int ret;
@@ -837,6 +970,9 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
ewma_signal_init(&wcid->rssi);
+ if (ext_phy)
+ mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
+ wcid->ext_phy = ext_phy;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
@@ -851,9 +987,6 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int i, idx = wcid->idx;
- rcu_assign_pointer(dev->wcid[idx], NULL);
- synchronize_rcu();
-
for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
mt76_rx_aggr_stop(dev, wcid, i);
@@ -863,7 +996,8 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_tx_status_check(dev, wcid, true);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
- mt76_wcid_free(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
@@ -881,11 +1015,13 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
- return mt76_sta_add(dev, vif, sta);
+ return mt76_sta_add(dev, vif, sta, ext_phy);
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
@@ -900,30 +1036,27 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
{
- struct mt76_dev *dev = hw->priv;
- int n_chains = hweight8(dev->antenna_mask);
+ struct mt76_phy *phy = hw->priv;
+ int n_chains = hweight8(phy->antenna_mask);
+ int delta = mt76_tx_power_nss_delta(n_chains);
- *dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
-
- /* convert from per-chain power to combined
- * output power
- */
- switch (n_chains) {
- case 4:
- *dbm += 6;
- break;
- case 3:
- *dbm += 4;
- break;
- case 2:
- *dbm += 3;
- break;
- default:
- break;
- }
+ *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
}
@@ -1005,11 +1138,11 @@ int mt76_get_rate(struct mt76_dev *dev,
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband == &dev->sband_5g.sband)
+ if (sband == &dev->phy.sband_5g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->sband_2g.sband) {
+ } else if (sband == &dev->phy.sband_2g.sband) {
offset = 4;
}
@@ -1025,27 +1158,28 @@ EXPORT_SYMBOL_GPL(mt76_get_rate);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
- set_bit(MT76_SCANNING, &dev->state);
+ set_bit(MT76_SCANNING, &phy->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan);
void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
- clear_bit(MT76_SCANNING, &dev->state);
+ clear_bit(MT76_SCANNING, &phy->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
mutex_lock(&dev->mutex);
- *tx_ant = dev->antenna_mask;
- *rx_ant = dev->antenna_mask;
+ *tx_ant = phy->antenna_mask;
+ *rx_ant = phy->antenna_mask;
mutex_unlock(&dev->mutex);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 2a976688804d..633ad948c21d 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -24,7 +24,6 @@ mt76_mcu_msg_alloc(const void *data, int head_len,
}
EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
-/* mmio */
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)
{
@@ -34,16 +33,17 @@ struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
return NULL;
timeout = expires - jiffies;
- wait_event_timeout(dev->mmio.mcu.wait,
- !skb_queue_empty(&dev->mmio.mcu.res_q),
+ wait_event_timeout(dev->mcu.wait,
+ (!skb_queue_empty(&dev->mcu.res_q) ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)),
timeout);
- return skb_dequeue(&dev->mmio.mcu.res_q);
+ return skb_dequeue(&dev->mcu.res_q);
}
EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
{
- skb_queue_tail(&dev->mmio.mcu.res_q, skb);
- wake_up(&dev->mmio.mcu.wait);
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
}
EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 1c974df1fe25..7ead6620bb8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -94,9 +94,6 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
dev->bus = &mt76_mmio_ops;
dev->mmio.regs = regs;
- skb_queue_head_init(&dev->mmio.mcu.res_q);
- init_waitqueue_head(&dev->mmio.mcu.wait);
spin_lock_init(&dev->mmio.irq_lock);
- mutex_init(&dev->mmio.mcu.mutex);
}
EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index fb077760347a..2e57e7c6bd29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#define MT_SKB_HEAD_LEN 128
struct mt76_dev;
+struct mt76_phy;
struct mt76_wcid;
struct mt76_reg_pair {
@@ -177,6 +178,9 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 128
+/* stored in ieee80211_tx_info::hw_queue */
+#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
+
DECLARE_EWMA(signal, 10, 8);
#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
@@ -196,6 +200,7 @@ struct mt76_wcid {
u8 hw_key_idx;
u8 sta:1;
+ u8 ext_phy:1;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
@@ -237,6 +242,8 @@ struct mt76_rx_tid {
u8 size;
u8 nframes;
+ u8 num;
+
u8 started:1, stopped:1, timer_pending:1;
struct sk_buff *reorder_buf[];
@@ -267,6 +274,7 @@ enum {
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
+ MT76_MCU_RESET,
MT76_REMOVED,
MT76_READING_STATS,
};
@@ -279,6 +287,7 @@ struct mt76_hw_cap {
#define MT_DRV_TXWI_NO_FREE BIT(0)
#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
#define MT_DRV_SW_RX_AIRTIME BIT(2)
+#define MT_DRV_RX_DMA_HDR BIT(3)
struct mt76_driver_ops {
u32 drv_flags;
@@ -321,6 +330,8 @@ struct mt76_channel_state {
u64 cc_rx;
u64 cc_bss_rx;
u64 cc_tx;
+
+ s8 noise;
};
struct mt76_sband {
@@ -350,12 +361,15 @@ struct mt76_rate_power {
enum mt_vendor_req {
MT_VEND_DEV_MODE = 0x1,
MT_VEND_WRITE = 0x2,
+ MT_VEND_POWER_ON = 0x4,
MT_VEND_MULTI_WRITE = 0x6,
MT_VEND_MULTI_READ = 0x7,
MT_VEND_READ_EEPROM = 0x9,
MT_VEND_WRITE_FCE = 0x42,
MT_VEND_WRITE_CFG = 0x46,
MT_VEND_READ_CFG = 0x47,
+ MT_VEND_READ_EXT = 0x63,
+ MT_VEND_WRITE_EXT = 0x66,
};
enum mt76u_in_ep {
@@ -374,20 +388,27 @@ enum mt76u_out_ep {
__MT_EP_OUT_MAX,
};
+struct mt76_mcu {
+ struct mutex mutex;
+ u32 msg_seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+};
+
#define MT_TX_SG_MAX_SIZE 8
-#define MT_RX_SG_MAX_SIZE 1
+#define MT_RX_SG_MAX_SIZE 4
#define MT_NUM_TX_ENTRIES 256
#define MT_NUM_RX_ENTRIES 128
#define MCU_RESP_URB_SIZE 1024
struct mt76_usb {
struct mutex usb_ctrl_mtx;
- union {
- u8 data[32];
- __le32 reg_val;
- };
+ __le32 reg_val;
+ u8 *data;
+ u16 data_len;
struct tasklet_struct rx_tasklet;
- struct workqueue_struct *stat_wq;
+ struct workqueue_struct *wq;
struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
@@ -395,10 +416,7 @@ struct mt76_usb {
bool sg_en;
struct mt76u_mcu {
- struct mutex mutex;
u8 *data;
- u32 msg_seq;
-
/* multiple reads */
struct mt76_reg_pair *rp;
int rp_len;
@@ -408,14 +426,6 @@ struct mt76_usb {
};
struct mt76_mmio {
- struct mt76e_mcu {
- struct mutex mutex;
-
- wait_queue_head_t wait;
- struct sk_buff_head res_q;
-
- u32 msg_seq;
- } mcu;
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
@@ -433,6 +443,7 @@ struct mt76_rx_status {
u8 iv[6];
+ u8 ext_phy:1;
u8 aggr:1;
u8 tid;
u16 seqno;
@@ -449,12 +460,33 @@ struct mt76_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
-struct mt76_dev {
+struct mt76_phy {
struct ieee80211_hw *hw;
+ struct mt76_dev *dev;
+ void *priv;
+
+ unsigned long state;
+
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ ktime_t survey_time;
+
+ struct mt76_sband sband_2g;
+ struct mt76_sband sband_5g;
+
+ int txpower_cur;
+ u8 antenna_mask;
+};
+
+struct mt76_dev {
+ struct mt76_phy phy; /* must be first */
+
+ struct mt76_phy *phy2;
+
+ struct ieee80211_hw *hw;
+
spinlock_t lock;
spinlock_t cc_lock;
@@ -471,14 +503,15 @@ struct mt76_dev {
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
+ struct mt76_mcu mcu;
+
struct net_device napi_dev;
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
- u32 ampdu_ref;
struct list_head txwi_cache;
- struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
@@ -491,32 +524,25 @@ struct mt76_dev {
struct sk_buff_head status_list;
unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
+ unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG];
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
u8 macaddr[ETH_ALEN];
u32 rev;
- unsigned long state;
u32 aggr_stats[32];
- u8 antenna_mask;
- u16 chainmask;
-
struct tasklet_struct pre_tbtt_tasklet;
int beacon_int;
u8 beacon_mask;
- struct mt76_sband sband_2g;
- struct mt76_sband sband_5g;
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
struct mt76_rate_power rate_power;
- int txpower_conf;
- int txpower_cur;
enum nl80211_dfs_regions region;
@@ -529,8 +555,6 @@ struct mt76_dev {
u8 csa_complete;
- ktime_t survey_time;
-
u32 rxfilter;
union {
@@ -581,7 +605,17 @@ enum mt76_phy_type {
#define __mt76_rmw_field(_dev, _reg, _field, _val) \
__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
-#define mt76_hw(dev) (dev)->mt76.hw
+#define mt76_hw(dev) (dev)->mphy.hw
+
+static inline struct ieee80211_hw *
+mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
+{
+ if (wcid <= MT76_N_WCIDS &&
+ mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
+ return dev->phy2->hw;
+
+ return dev->phy.hw;
+}
bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
@@ -624,6 +658,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
+void mt76_unregister_phy(struct mt76_phy *phy);
+
+struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops);
+int mt76_register_phy(struct mt76_phy *phy);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_queues_read(struct seq_file *s, void *data);
@@ -633,6 +672,20 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+static inline struct mt76_phy *
+mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
+{
+ if (phy_ext && dev->phy2)
+ return dev->phy2;
+ return &dev->phy;
+}
+
+static inline struct ieee80211_hw *
+mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
+{
+ return mt76_dev_phy(dev, phy_ext)->hw;
+}
+
static inline u8 *
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -701,24 +754,31 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
return pktid >= MT_PACKET_ID_FIRST;
}
+static inline u8 mt76_tx_power_nss_delta(u8 nss)
+{
+ static const u8 nss_delta[4] = { 0, 6, 9, 12 };
+
+ return nss_delta[nss - 1];
+}
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
-void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
-void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
-void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
+void mt76_txq_schedule_all(struct mt76_phy *phy);
void mt76_tx_tasklet(unsigned long data);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
-bool mt76_has_tx_pending(struct mt76_dev *dev);
-void mt76_set_channel(struct mt76_dev *dev);
+bool mt76_has_tx_pending(struct mt76_phy *phy);
+void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@@ -752,8 +812,10 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_sta_state new_state);
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
@@ -771,10 +833,22 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
- int len);
/* internal */
+static inline struct ieee80211_hw *
+mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = dev->phy.hw;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
+ hw = dev->phy2->hw;
+
+ info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
+
+ return hw;
+}
+
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
@@ -783,8 +857,6 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
-u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
- int len);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -804,7 +876,7 @@ static inline u8 q2ep(u8 qid)
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
- int timeout)
+ int timeout, int ep)
{
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
@@ -812,20 +884,23 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
unsigned int pipe;
if (actual_len)
- pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
+ pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
else
- pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
+int mt76u_skb_dma_info(struct sk_buff *skb, u32 info);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ bool ext);
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
void mt76u_stop_rx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index e5af4f3389cc..60a996b63c0c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
#include "mt7603.h"
+#include "../trace.h"
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
@@ -17,9 +18,11 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_MAC_IRQ3) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index a6ab73060aad..a08b85281170 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -30,6 +30,16 @@ mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
+ static const u8 tid_to_ac[8] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+ };
__le32 *txd = (__le32 *)skb->data;
struct ieee80211_hdr *hdr;
struct ieee80211_sta *sta;
@@ -38,7 +48,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
void *priv;
int idx;
u32 val;
- u8 tid;
+ u8 tid = 0;
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
@@ -56,15 +66,16 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
- skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
-
val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
- tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ skb_set_queue_mapping(skb, tid_to_ac[tid]);
ieee80211_sta_set_buffered(sta, tid, true);
spin_lock_bh(&dev->ps_lock);
@@ -210,7 +221,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
- MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 0696dbf28c5b..9e40e81bcc29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -113,7 +113,7 @@ mt7603_dma_sched_init(struct mt7603_dev *dev)
static void
mt7603_phy_init(struct mt7603_dev *dev)
{
- int rx_chains = dev->mt76.antenna_mask;
+ int rx_chains = dev->mphy.antenna_mask;
int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
@@ -284,7 +284,7 @@ mt7603_init_hardware(struct mt7603_dev *dev)
mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
mt7603_mac_dma_start(dev);
dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
for (i = 0; i < MT7603_WTBL_SIZE; i++) {
mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
@@ -363,9 +363,9 @@ static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
mt76);
u32 val, addr;
- val = MT_LED_STATUS_DURATION(0xffff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
mt76_wr(dev, addr, val);
@@ -493,12 +493,12 @@ mt7603_init_txpower(struct mt7603_dev *dev,
target_power += max_offset;
dev->tx_power_limit = target_power;
- dev->mt76.txpower_cur = target_power;
+ dev->mphy.txpower_cur = target_power;
target_power = DIV_ROUND_UP(target_power, 2);
/* add 3 dBm for 2SS devices (combined output) */
- if (dev->mt76.antenna_mask & BIT(1))
+ if (dev->mphy.antenna_mask & BIT(1))
target_power += 3;
for (i = 0; i < sband->n_channels; i++) {
@@ -535,9 +535,9 @@ int mt7603_register_device(struct mt7603_dev *dev)
(unsigned long)dev);
/* Check for 7688, which only has 1SS */
- dev->mt76.antenna_mask = 3;
+ dev->mphy.antenna_mask = 3;
if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
- dev->mt76.antenna_mask = 1;
+ dev->mphy.antenna_mask = 1;
dev->slottime = 9;
@@ -557,6 +557,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -564,7 +565,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
}
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
@@ -573,7 +573,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
return ret;
mt7603_init_debugfs(dev);
- mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt7603_init_txpower(dev, &dev->mphy.sband_2g.sband);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 812d081ad943..8f5ca9283f7d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -4,6 +4,7 @@
#include <linux/timekeeping.h>
#include "mt7603.h"
#include "mac.h"
+#include "../trace.h"
#define MT_PSE_PAGE_SIZE 128
@@ -53,7 +54,7 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
int sifs;
u32 val;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
sifs = 16;
else
sifs = 10;
@@ -456,7 +457,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
return;
spin_lock_bh(&dev->mt76.cc_lock);
- dev->mt76.chan_state->cc_tx += total_airtime;
+ dev->mphy.chan_state->cc_tx += total_airtime;
spin_unlock_bh(&dev->mt76.cc_lock);
}
@@ -502,7 +503,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
- sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
+ sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband;
i >>= 1;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
@@ -531,12 +532,12 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
/* all subframes of an A-MPDU have the same timestamp */
if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
}
dev->rx_ampdu_ts = rxd[12];
- status->ampdu_ref = dev->mt76.ampdu_ref;
+ status->ampdu_ref = dev->ampdu_ref;
}
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
@@ -609,7 +610,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->rate_idx = i;
- status->chains = dev->mt76.antenna_mask;
+ status->chains = dev->mphy.antenna_mask;
status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
dev->rssi_offset[0];
status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
@@ -668,7 +669,7 @@ mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
*bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = dev->mphy.chandef.chan->band;
u16 val;
nss = 1;
@@ -1156,10 +1157,10 @@ out:
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mphy.sband_5g.sband;
else
- sband = &dev->mt76.sband_2g.sband;
+ sband = &dev->mphy.sband_2g.sband;
final_rate &= GENMASK(5, 0);
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
@@ -1193,6 +1194,8 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
if (pid < MT_PACKET_ID_FIRST)
return false;
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
if (skb) {
@@ -1389,10 +1392,10 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
int i;
ieee80211_stop_queues(dev->mt76.hw);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
tasklet_disable(&dev->mt76.tx_tasklet);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
@@ -1426,7 +1429,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
@@ -1439,7 +1442,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_irq_enable(dev, mask);
skip_dma_reset:
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.tx_tasklet);
@@ -1456,7 +1459,7 @@ skip_dma_reset:
napi_schedule(&dev->mt76.napi[1]);
ieee80211_wake_queues(dev->mt76.hw);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
@@ -1574,7 +1577,7 @@ void mt7603_update_channel(struct mt76_dev *mdev)
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->chan_state;
+ state = mdev->phy.chan_state;
state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
@@ -1737,7 +1740,7 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
mt7603_cca_stats_reset(dev);
- min_signal = mt76_get_min_avg_rssi(&dev->mt76);
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!min_signal) {
dev->sensitivity = 0;
dev->last_cca_adj = jiffies;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 962e2822d19f..26cb711b465f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -15,8 +15,8 @@ mt7603_start(struct ieee80211_hw *hw)
mt7603_mac_reset_counters(dev);
mt7603_mac_start(dev);
- dev->mt76.survey_time = ktime_get_boottime();
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ dev->mphy.survey_time = ktime_get_boottime();
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt7603_mac_work(&dev->mt76.mac_work.work);
return 0;
@@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt7603_mac_stop(dev);
}
@@ -143,16 +143,16 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mt76.chandef = *def;
+ dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -176,9 +176,9 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -187,10 +187,10 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
mt76_set(dev, MT_MIB_CTL,
MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
- mt76_rr(dev, MT_MIB_STAT_PSCCA);
+ mt76_rr(dev, MT_MIB_STAT_CCA);
mt7603_cca_stats_reset(dev);
- dev->mt76.survey_time = ktime_get_boottime();
+ dev->mphy.survey_time = ktime_get_boottime();
mt7603_init_edcca(dev);
@@ -642,7 +642,7 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
- dev->coverage_class = coverage_class;
+ dev->coverage_class = max_t(s16, coverage_class, 0);
mt7603_mac_set_timing(dev);
}
@@ -667,7 +667,7 @@ static void mt7603_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
}
const struct ieee80211_ops mt7603_ops = {
@@ -680,6 +680,7 @@ const struct ieee80211_ops mt7603_ops = {
.configure_filter = mt7603_configure_filter,
.bss_info_changed = mt7603_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7603_set_key,
.conf_tx = mt7603_conf_tx,
.sw_scan_start = mt76_sw_scan,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 02b2bd60d04d..b466b3ab8a2c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -22,9 +22,9 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
struct mt7603_mcu_txd *txd;
u8 seq;
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
memset(txd, 0, hdrlen);
@@ -67,7 +67,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
@@ -97,7 +97,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -277,7 +277,7 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
}
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
@@ -397,7 +397,7 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
u8 temp_comp_power[17];
u8 reserved;
} req = {
- .center_channel = dev->mt76.chandef.chan->hw_value,
+ .center_channel = dev->mphy.chandef.chan->hw_value,
#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
.tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
.temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
@@ -430,9 +430,9 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
- int n_chains = hweight8(dev->mt76.antenna_mask);
+ int n_chains = hweight8(dev->mphy.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
@@ -452,7 +452,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
s8 tx_power;
int i, ret;
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_40) {
req.bw = MT_BW_40;
if (chandef->center_freq1 > chandef->chan->center_freq)
req.center_chan += 2;
@@ -461,11 +461,11 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
}
tx_power = hw->conf.power_level * 2;
- if (dev->mt76.antenna_mask == 3)
+ if (dev->mphy.antenna_mask == 3)
tx_power -= 6;
tx_power = min(tx_power, dev->tx_power_limit);
- dev->mt76.txpower_cur = tx_power;
+ dev->mphy.txpower_cur = tx_power;
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index ab54b0612e98..ef374641fe80 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -15,6 +15,7 @@
#define MT7603_RATE_RETRY 2
+#define MT7603_MCU_RX_RING_SIZE 64
#define MT7603_RX_RING_SIZE 128
#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
@@ -98,7 +99,10 @@ enum mt7603_reset_cause {
};
struct mt7603_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
const struct mt76_bus_ops *bus_ops;
@@ -115,6 +119,7 @@ struct mt7603_dev {
u32 false_cca_ofdm, false_cca_cck;
unsigned long last_cca_adj;
+ u32 ampdu_ref;
__le32 rx_ampdu_ts;
u8 rssi_offset[3];
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index 6e23ed3dfdff..6741e6907194 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -585,18 +585,9 @@ enum {
#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << \
- __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << \
- __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << \
- __ffs(MT_LED_STATUS_DURATION_MASK)) &\
- MT_LED_STATUS_DURATION_MASK)
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
index 4cabba9aa2ea..6afd4aea67ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -11,3 +11,14 @@ config MT7615E
MU-MIMO up to 4 users/group and 160MHz channels.
To compile this driver as a module, choose M here.
+
+config MT7622_WMAC
+ bool "MT7622 (SoC) WMAC support"
+ depends on MT7615E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ default y
+ help
+ This adds support for the built-in WMAC on MT7622 SoC devices
+ which has the same feature set as a MT7615, but limited to
+ 2.4 GHz only.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
index 5aaac69849d6..5c6a220ed7e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -2,5 +2,8 @@
obj-$(CONFIG_MT7615E) += mt7615e.o
-mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
- debugfs.o
+CFLAGS_trace.o := -I$(src)
+
+mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o mmio.o \
+ debugfs.o trace.o
+mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index f6b75f832e6a..b4d0795154e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -7,6 +7,9 @@ mt7615_radar_pattern_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
return mt7615_mcu_rdd_send_pattern(dev);
}
@@ -18,6 +21,9 @@ mt7615_scs_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
mt7615_mac_set_scs(dev, val);
return 0;
@@ -37,6 +43,84 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
mt7615_scs_set, "%lld\n");
static int
+mt7615_dbdc_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ if (val)
+ mt7615_register_ext_phy(dev);
+ else
+ mt7615_unregister_ext_phy(dev);
+
+ return 0;
+}
+
+static int
+mt7615_dbdc_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = !!mt7615_ext_phy(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7615_dbdc_get,
+ mt7615_dbdc_set, "%lld\n");
+
+static int
+mt7615_fw_debug_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ dev->fw_debug = val;
+ mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+
+ return 0;
+}
+
+static int
+mt7615_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7615_fw_debug_get,
+ mt7615_fw_debug_set, "%lld\n");
+
+static int
+mt7615_reset_test_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ struct sk_buff *skb;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ skb = alloc_skb(1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, 1);
+ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL,
+ mt7615_reset_test_set, "%lld\n");
+
+static int
mt7615_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
@@ -74,15 +158,28 @@ static const struct file_operations fops_ampdu_stat = {
.release = single_release,
};
+static void
+mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!phy)
+ return;
+
+ seq_printf(s, "Radio %d sensitivity: ofdm=%d cck=%d\n", ext_phy,
+ phy->ofdm_sensitivity, phy->cck_sensitivity);
+ seq_printf(s, "Radio %d false CCA: ofdm=%d cck=%d\n", ext_phy,
+ phy->false_cca_ofdm, phy->false_cca_cck);
+}
+
static int
mt7615_radio_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
- seq_printf(s, "Sensitivity: ofdm=%d cck=%d\n",
- dev->ofdm_sensitivity, dev->cck_sensitivity);
- seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
- dev->false_cca_ofdm, dev->false_cca_cck);
+ mt7615_radio_read_phy(&dev->phy, s);
+ mt7615_radio_read_phy(mt7615_ext_phy(dev), s);
return 0;
}
@@ -92,6 +189,9 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
struct mt7615_dev *dev = dev_get_drvdata(s->private);
int temp;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
/* cpu */
temp = mt7615_mcu_get_temperature(dev, 0);
seq_printf(s, "Temperature: %d\n", temp);
@@ -164,12 +264,18 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7615_queues_read);
+ if (is_mt7615(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7615_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
+ debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
@@ -184,6 +290,8 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
&dev->radar_pattern.power);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_pattern);
+ debugfs_create_file("reset_test", 0200, dir, dev,
+ &fops_reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7615_read_temperature);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 285d4f1d6178..1bc71f5081ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -12,47 +12,85 @@
#include "mac.h"
static int
-mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
+mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
+ int idx, int n_desc)
{
- struct mt76_sw_queue *q;
struct mt76_queue *hwq;
- int err, i;
+ int err;
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return -ENOMEM;
- err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
- for (i = 0; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
- }
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
return 0;
}
static int
-mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
{
- struct mt76_queue *hwq;
- int err;
+ static const u8 wmm_queue_map[] = {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_AC3,
+ };
+ int ret;
+ int i;
- hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
- if (!hwq)
- return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ wmm_queue_map[i],
+ MT7615_TX_RING_SIZE / 2);
+ if (ret)
+ return ret;
+ }
- err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
- if (err < 0)
- return err;
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
+ if (ret)
+ return ret;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
+ return ret;
+}
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev)
+{
+ struct mt76_sw_queue *q;
+ int ret, i;
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ MT7615_TXQ_FWDL,
+ MT7615_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return mt7622_init_tx_queues_multi(dev);
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0,
+ MT7615_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < MT_TXQ_MCU; i++) {
+ q = &dev->mt76.q_tx[i];
+ INIT_LIST_HEAD(&q->swq);
+ q->q = dev->mt76.q_tx[0].q;
+ }
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7615_TXQ_MCU,
+ MT7615_TX_MCU_RING_SIZE);
return 0;
}
@@ -90,25 +128,32 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static void
+mt7615_tx_cleanup(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+ if (is_mt7615(&dev->mt76)) {
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ mt76_queue_tx_cleanup(dev, i, false);
+ }
+}
+
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
- static const u8 queue_map[] = {
- MT_TXQ_MCU,
- MT_TXQ_BE
- };
struct mt7615_dev *dev;
- int i;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
- for (i = 0; i < ARRAY_SIZE(queue_map); i++)
- mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_tx_cleanup(dev);
if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
- for (i = 0; i < ARRAY_SIZE(queue_map); i++)
- mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_tx_cleanup(dev);
mt7615_mac_sta_poll(dev);
@@ -117,8 +162,33 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
+static void mt7622_dma_sched_init(struct mt7615_dev *dev)
+{
+ u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE);
+ int i;
+
+ mt76_rmw(dev, reg + MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ for (i = 0; i <= 5; i++)
+ mt76_wr(dev, reg + MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x10) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(0), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(1), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(2), 0x5);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(3), 0);
+
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET0, 0x6012345f);
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET1, 0xedcba987);
+}
+
int mt7615_dma_init(struct mt7615_dev *dev)
{
+ int rx_ring_size = MT7615_RX_RING_SIZE;
int ret;
mt76_dma_attach(&dev->mt76);
@@ -126,9 +196,12 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
- MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
+ if (!is_mt7622(&dev->mt76))
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY);
+
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
@@ -141,28 +214,19 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
- mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
- mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
- mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
- mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
- mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
- mt76_set(dev, 0x7158, BIT(16));
- mt76_clear(dev, 0x7000, BIT(23));
- mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
-
- ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
- if (ret)
- return ret;
+ if (is_mt7615(&dev->mt76)) {
+ mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+ mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+ mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+ mt76_set(dev, 0x7158, BIT(16));
+ mt76_clear(dev, 0x7000, BIT(23));
+ }
- ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7615_TXQ_MCU,
- MT7615_TX_MCU_RING_SIZE);
- if (ret)
- return ret;
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
- ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
- MT7615_TXQ_FWDL,
- MT7615_TX_FWDL_RING_SIZE);
+ ret = mt7615_init_tx_queues(dev);
if (ret)
return ret;
@@ -173,9 +237,11 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret)
return ret;
+ if (!is_mt7615(&dev->mt76))
+ rx_ring_size /= 2;
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
- MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
- MT_RX_RING_BASE);
+ rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
if (ret)
return ret;
@@ -199,7 +265,11 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+
+ if (is_mt7622(&dev->mt76))
+ mt7622_dma_sched_init(dev);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 17e277bf39e0..5220c18e711f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -84,6 +84,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
switch (val) {
case 0x7615:
+ case 0x7622:
return 0;
default:
return -EINVAL;
@@ -93,7 +94,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
u8 *eeprom = dev->mt76.eeprom.data;
- u8 tx_mask, rx_mask, max_nss;
+ u8 tx_mask, max_nss;
u32 val;
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
@@ -111,22 +112,21 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
break;
}
+ if (is_mt7622(&dev->mt76))
+ dev->mt76.cap.has_5ghz = false;
+
/* read tx-rx mask from eeprom */
val = mt76_rr(dev, MT_TOP_STRAP_STA);
max_nss = val & MT_TOP_3NSS ? 3 : 4;
- rx_mask = FIELD_GET(MT_EE_NIC_CONF_RX_MASK,
- eeprom[MT_EE_NIC_CONF_0]);
- if (!rx_mask || rx_mask > max_nss)
- rx_mask = max_nss;
-
tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
eeprom[MT_EE_NIC_CONF_0]);
if (!tx_mask || tx_mask > max_nss)
tx_mask = max_nss;
- dev->mt76.chainmask = tx_mask << 8 | rx_mask;
- dev->mt76.antenna_mask = BIT(tx_mask) - 1;
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
@@ -209,6 +209,26 @@ static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]];
}
+static void mt7622_apply_cal_free_data(struct mt7615_dev *dev)
+{
+ static const u16 ical[] = {
+ 0x53, 0x54, 0x55, 0x56, 0xf4, 0xf7, 0x144, 0x156, 0x15b
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 *otp = dev->mt76.otp.data;
+ int i;
+
+ if (!otp)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ical); i++) {
+ if (!otp[ical[i]])
+ continue;
+
+ eeprom[ical[i]] = otp[ical[i]];
+ }
+}
+
int mt7615_eeprom_init(struct mt7615_dev *dev)
{
int ret;
@@ -221,6 +241,8 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
if (ret && dev->mt76.otp.data)
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7615_EEPROM_SIZE);
+ else if (is_mt7622(&dev->mt76))
+ mt7622_apply_cal_free_data(dev);
else
mt7615_apply_cal_free_data(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index c3bc69ac210e..18c7301521b7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -21,7 +21,8 @@ enum mt7615_eeprom_field {
MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
- __MT_EE_MAX = 0x3bf
+ MT7615_EE_MAX = 0x3bf,
+ MT7622_EE_MAX = 0x3db,
};
#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 553bd4d988f7..889eb72ad6bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -13,9 +13,9 @@
static void mt7615_phy_init(struct mt7615_dev *dev)
{
- /* disable band 0 rf low power beacon mode */
- mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
- MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ /* disable rf low power beacon mode */
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
}
static void mt7615_mac_init(struct mt7615_dev *dev)
@@ -28,17 +28,17 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN |
MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN);
- val = mt76_rmw(dev, MT_TMAC_TRCR0,
+ val = mt76_rmw(dev, MT_TMAC_TRCR(0),
MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
- mt76_wr(dev, MT_TMAC_TRCR1, val);
+ mt76_wr(dev, MT_TMAC_TRCR(1), val);
val = MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
- FIELD_PREP(MT_AGG_ACR_CFEND_RATE, 0x49) | /* 24M */
- FIELD_PREP(MT_AGG_ACR_BAR_RATE, 0x4b); /* 6M */
- mt76_wr(dev, MT_AGG_ACR0, val);
- mt76_wr(dev, MT_AGG_ACR1, val);
+ FIELD_PREP(MT_AGG_ACR_CFEND_RATE, MT7615_CFEND_RATE_DEFAULT) |
+ FIELD_PREP(MT_AGG_ACR_BAR_RATE, MT7615_BAR_RATE_DEFAULT);
+ mt76_wr(dev, MT_AGG_ACR(0), val);
+ mt76_wr(dev, MT_AGG_ACR(1), val);
mt76_rmw_field(dev, MT_TMAC_CTCR0,
MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
@@ -50,36 +50,36 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
MT_TMAC_CTCR0_INS_DDLMT_EN);
- mt7615_mcu_set_rts_thresh(dev, 0x92b);
+ mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
mt7615_mac_set_scs(dev, true);
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
- mt7615_mcu_init_mac(dev);
-
mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
- mt76_wr(dev, MT_AGG_ARUCR,
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
-
- mt76_wr(dev, MT_AGG_ARDCR,
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
+ val = FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1);
+ mt76_wr(dev, MT_AGG_ARUCR(0), val);
+ mt76_wr(dev, MT_AGG_ARUCR(1), val);
+
+ val = FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1);
+ mt76_wr(dev, MT_AGG_ARDCR(0), val);
+ mt76_wr(dev, MT_AGG_ARDCR(1), val);
mt76_wr(dev, MT_AGG_ARCR,
(FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
@@ -95,8 +95,8 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_DMA_RCFR0_RX_DROPPED_MCAST;
set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
- mt76_rmw(dev, MT_DMA_BN0RCFR0, mask, set);
- mt76_rmw(dev, MT_DMA_BN1RCFR0, mask, set);
+ mt76_rmw(dev, MT_DMA_RCFR0(0), mask, set);
+ mt76_rmw(dev, MT_DMA_RCFR0(1), mask, set);
for (i = 0; i < MT7615_WTBL_SIZE; i++)
mt7615_mac_wtbl_update(dev, i,
@@ -106,12 +106,33 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
}
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
+{
+ flush_work(&dev->mcu_work);
+
+ return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+}
+
+static void mt7615_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work);
+
+ if (mt7615_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
+}
+
static int mt7615_init_hardware(struct mt7615_dev *dev)
{
int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+ INIT_WORK(&dev->mcu_work, mt7615_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
@@ -123,17 +144,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
if (ret)
return ret;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-
- ret = mt7615_mcu_init(dev);
- if (ret)
- return ret;
-
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_ctrl_pm_state(dev, 0);
- mt7615_mcu_del_wtbl_all(dev);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
@@ -200,12 +211,65 @@ static const struct ieee80211_iface_combination if_comb[] = {
};
static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val, addr;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7615_dev, mt76);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int
+mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+
+static void
mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
- int i, n_chains = hweight8(dev->mt76.antenna_mask), target_chains;
+ int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
+ int delta = mt76_tx_power_nss_delta(n_chains);
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < sband->n_channels; i++) {
@@ -220,21 +284,7 @@ mt7615_init_txpower(struct mt7615_dev *dev,
target_power = max(target_power, eep[index]);
}
- target_power = DIV_ROUND_UP(target_power, 2);
- switch (n_chains) {
- case 4:
- target_power += 6;
- break;
- case 3:
- target_power += 4;
- break;
- case 2:
- target_power += 3;
- break;
- default:
- break;
- }
-
+ target_power = DIV_ROUND_UP(target_power + delta, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
@@ -246,74 +296,181 @@ mt7615_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt7615_dev *dev = hw->priv;
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
-
- if (request->dfs_region == dev->mt76.region)
- return;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7615_phy *phy = mphy->priv;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
dev->mt76.region = request->dfs_region;
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
- mt7615_dfs_stop_radar_detector(dev);
- if (request->dfs_region == NL80211_DFS_UNSET)
- mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- else
- mt7615_dfs_start_radar_detector(dev);
+ mt7615_dfs_init_radar_detector(phy);
}
-int mt7615_register_device(struct mt7615_dev *dev)
+static void
+mt7615_init_wiphy(struct ieee80211_hw *hw)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct wiphy *wiphy = hw->wiphy;
- int ret;
-
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
- INIT_LIST_HEAD(&dev->sta_poll_list);
- spin_lock_init(&dev->sta_poll_lock);
-
- ret = mt7615_init_hardware(dev);
- if (ret)
- return ret;
hw->queues = 4;
hw->max_rates = 3;
hw->max_report_rates = 7;
hw->max_rate_tries = 11;
+ phy->slottime = 9;
+
hw->sta_data_size = sizeof(struct mt7615_sta);
hw->vif_data_size = sizeof(struct mt7615_vif);
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7615_regd_notifier;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.vht_cap.cap |=
+ if (is_mt7615(&phy->dev->mt76))
+ hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+}
+
+static void
+mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap &=
+ ~(IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ);
+ if (dev->chainmask == 0xf)
+ dev->mphy.antenna_mask = dev->chainmask >> 2;
+ else
+ dev->mphy.antenna_mask = dev->chainmask >> 1;
+ dev->phy.chainmask = dev->mphy.antenna_mask;
+ mt76_set_stream_caps(&dev->mt76, true);
+}
+
+static void
+mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- dev->dfs_state = -1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+ mt76_set_stream_caps(&dev->mt76, true);
+}
+
+int mt7615_register_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy;
+ int ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return -EOPNOTSUPP;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (phy)
+ return 0;
+
+ mt7615_cap_dbdc_enable(dev);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mt7615_init_wiphy(mphy->hw);
+
+ /*
+ * Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ mphy->hw->wiphy->perm_addr[0] |= 2;
+ mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
+
+ /* second phy can only handle 5 GHz */
+ mphy->sband_2g.sband.n_channels = 0;
+ mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+
+ /* The second interface does not get any packets unless it has a vif */
+ ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
+
+ ret = mt76_register_phy(mphy);
+ if (ret)
+ ieee80211_free_hw(mphy->hw);
+
+ return ret;
+}
+
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phy2;
+
+ if (!phy)
+ return;
+
+ mt7615_cap_dbdc_disable(dev);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int ret;
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
+
+ ret = mt7622_wmac_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ mt7615_init_wiphy(hw);
+ dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ mt7615_cap_dbdc_disable(dev);
+ dev->phy.dfs_state = -1;
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ }
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
return ret;
- mt7615_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt7615_init_txpower(dev, &dev->mt76.sband_5g.sband);
-
- hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}
@@ -321,10 +478,15 @@ int mt7615_register_device(struct mt7615_dev *dev)
void mt7615_unregister_device(struct mt7615_dev *dev)
{
struct mt76_txwi_cache *txwi;
+ bool mcu_running;
int id;
+ mcu_running = mt7615_wait_for_mcu_init(dev);
+
+ mt7615_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
- mt7615_mcu_exit(dev);
+ if (mcu_running)
+ mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
spin_lock_bh(&dev->token_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index c77adc5d2552..145366dbc39b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -10,13 +10,50 @@
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
#include "mt7615.h"
+#include "../trace.h"
#include "../dma.h"
+#include "mt7615_trace.h"
#include "mac.h"
-static inline s8 to_rssi(u32 field, u32 rxv)
-{
- return (FIELD_GET(field, rxv) - 220) / 2;
-}
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
+ [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
+ [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 },
+ [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 },
+ [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 },
+ [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 },
+ },
+};
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
@@ -49,34 +86,116 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
mt76_rr(dev, MT_TX_AGG_CNT(i));
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
-
- /* TODO: add DBDC support */
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ if (dev->mt76.phy2)
+ dev->mt76.phy2->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR9(1));
+
mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR36(1));
+
mt76_rr(dev, MT_MIB_SDR37(0));
+ mt76_rr(dev, MT_MIB_SDR37(1));
+
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+void mt7615_mac_set_timing(struct mt7615_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+ int sifs, offset;
+
+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ if (ext_phy) {
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ } else {
+ struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
+
+ if (phy_ext)
+ coverage_class = max_t(s16, phy_ext->coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+ }
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
+
+ mt76_wr(dev, MT_TMAC_ICR(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20)
+ val = MT7615_CFEND_RATE_DEFAULT;
+ else
+ val = MT7615_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
+ if (ext_phy)
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ else
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+
+}
+
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_phy *phy = &dev->phy;
+ struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
__le32 *rxd = (__le32 *)skb->data;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
+ __le32 rxd12 = rxd[12];
bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int phy_idx;
int i, idx;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
+ u8 chfreq;
memset(status, 0, sizeof(*status));
+ chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+ if (!phy2)
+ phy_idx = 0;
+ else if (phy2->chfreq == phy->chfreq)
+ phy_idx = -1;
+ else if (phy->chfreq == chfreq)
+ phy_idx = 0;
+ else if (phy2->chfreq == chfreq)
+ phy_idx = 1;
+ else
+ phy_idx = -1;
+
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
@@ -91,14 +210,6 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
- /* TODO: properly support DBDC */
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
- if (status->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
- else
- sband = &dev->mt76.sband_2g.sband;
-
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -112,28 +223,11 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
- if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
- MT_RXD2_NORMAL_NON_AMPDU))) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
- }
- dev->rx_ampdu_ts = rxd[12];
-
- status->ampdu_ref = dev->mt76.ampdu_ref;
- }
-
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
return -EINVAL;
- if (!sband->channels)
- return -EINVAL;
-
rxd += 4;
if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
rxd += 4;
@@ -166,6 +260,59 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg5 = le32_to_cpu(rxd[5]);
+
+ /*
+ * If both PHYs are on the same channel and we don't have a WCID,
+ * we need to figure out which PHY this packet was received on.
+ * On the primary PHY, the noise value for the chains belonging to the
+ * second PHY will be set to the noise value of the last packet from
+ * that PHY.
+ */
+ if (phy_idx < 0) {
+ int first_chain = ffs(phy2->chainmask) - 1;
+
+ phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
+ }
+ }
+
+ if (phy_idx == 1 && phy2) {
+ mphy = dev->mt76.phy2;
+ phy = phy2;
+ status->ext_phy = true;
+ }
+
+ if (chfreq != phy->chfreq)
+ return -EINVAL;
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd12) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd12;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
u32 rxdg0 = le32_to_cpu(rxd[0]);
u32 rxdg1 = le32_to_cpu(rxd[1]);
u32 rxdg3 = le32_to_cpu(rxd[3]);
@@ -218,14 +365,14 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- status->chains = dev->mt76.antenna_mask;
+ status->chains = mphy->antenna_mask;
status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
status->signal = status->chain_signal[0];
- for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
if (!(status->chains & BIT(i)))
continue;
@@ -274,13 +421,20 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
if (e->skb == DMA_DUMMY_DATA) {
struct mt76_txwi_cache *t;
struct mt7615_dev *dev;
- struct mt7615_txp *txp;
+ struct mt7615_txp_common *txp;
+ u16 token;
dev = container_of(mdev, struct mt7615_dev, mt76);
txp = mt7615_txwi_to_txp(mdev, e->txwi);
+ if (is_mt7615(&dev->mt76))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ t = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
e->skb = t ? t->skb : NULL;
}
@@ -291,6 +445,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
static u16
mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ struct mt76_phy *mphy,
const struct ieee80211_tx_rate *rate,
bool stbc, u8 *bw)
{
@@ -319,11 +474,11 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
*bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = mphy->chandef.chan->band;
u16 val;
nss = 1;
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@@ -355,6 +510,8 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
int tx_count = 8;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
@@ -374,6 +531,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
tx_count = msta->rate_count;
}
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
@@ -382,10 +542,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
skb_get_queue_mapping(skb);
p_fmt = MT_TX_TYPE_CT;
} else if (ieee80211_is_beacon(fc)) {
- q_idx = MT_LMAC_BCN0;
+ if (ext_phy)
+ q_idx = MT_LMAC_BCN1;
+ else
+ q_idx = MT_LMAC_BCN0;
p_fmt = MT_TX_TYPE_FW;
} else {
- q_idx = MT_LMAC_ALTX0;
+ if (ext_phy)
+ q_idx = MT_LMAC_ALTX1;
+ else
+ q_idx = MT_LMAC_ALTX0;
p_fmt = MT_TX_TYPE_CT;
}
@@ -431,7 +597,8 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
u8 bw;
- u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
+ u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
+ &bw);
txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
@@ -486,18 +653,56 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
return 0;
}
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
+static void
+mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
{
- struct mt7615_txp *txp;
int i;
- txp = mt7615_txwi_to_txp(dev, t);
for (i = 1; i < txp->nbuf; i++)
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
+static void
+mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
+ struct mt7615_txp_ptr *ptr = &txp->ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & MT_TXD_LEN_MSDU_LAST;
+ len &= ~MT_TXD_LEN_MSDU_LAST;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & MT_TXD_LEN_MSDU_LAST;
+ len &= ~MT_TXD_LEN_MSDU_LAST;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7615_txp_common *txp;
+
+ txp = mt7615_txwi_to_txp(dev, t);
+ if (is_mt7615(dev))
+ mt7615_txp_skb_unmap_fw(dev, &txp->fw);
+ else
+ mt7615_txp_skb_unmap_hw(dev, &txp->hw);
+}
+
static u32 mt7615_mac_wtbl_addr(int wcid)
{
return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
@@ -588,10 +793,12 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
rcu_read_unlock();
}
-void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
struct ieee80211_tx_rate *ref;
int wcid = sta->wcid.idx;
u32 addr = mt7615_mac_wtbl_addr(wcid);
@@ -649,11 +856,12 @@ void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
}
- val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
bw_prev = bw;
if (probe_rate) {
- probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
+ stbc, &bw);
if (bw)
bw_idx = 1;
else
@@ -662,19 +870,19 @@ void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
probe_val = val[0];
}
- val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
if (bw_prev) {
bw_idx = 3;
bw_prev = bw;
}
- val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
if (bw_prev) {
bw_idx = 5;
bw_prev = bw;
}
- val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
if (bw_prev)
bw_idx = 7;
@@ -884,39 +1092,51 @@ out:
return err;
}
-int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
+static void
+mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_vif *vif = info->control.vif;
- int i, pid, id, nbuf = tx_info->nbuf - 1;
- u8 *txwi = (u8 *)txwi_ptr;
- struct mt76_txwi_cache *t;
- struct mt7615_txp *txp;
+ struct mt7615_hw_txp *txp = txp_ptr;
+ struct mt7615_txp_ptr *ptr = &txp->ptr[0];
+ int nbuf = tx_info->nbuf - 1;
+ int i;
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
- spin_lock_bh(&dev->mt76.lock);
- mt7615_mac_set_rates(dev, msta, &info->control.rates[0],
- msta->rates);
- msta->rate_probe = true;
- spin_unlock_bh(&dev->mt76.lock);
+ for (i = 0; i < nbuf; i++) {
+ u32 addr = tx_info->buf[i + 1].addr;
+ u16 len = tx_info->buf[i + 1].len;
+
+ if (i == nbuf - 1)
+ len |= MT_TXD_LEN_MSDU_LAST |
+ MT_TXD_LEN_AMSDU_LAST;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
}
+}
- mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
- pid, key);
+static void
+mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7615_fw_txp *txp = txp_ptr;
+ int nbuf = tx_info->nbuf - 1;
+ int i;
- txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
@@ -924,6 +1144,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->nbuf = nbuf;
/* pass partial skb header to fw */
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
@@ -941,6 +1162,42 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->bss_idx = mvif->idx;
}
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid, id;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ void *txp;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ struct mt7615_phy *phy = &dev->phy;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
+ phy = mdev->phy2->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
+ msta->rates);
+ msta->rate_probe = true;
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
@@ -950,8 +1207,16 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
- txp->token = cpu_to_le16(id);
- txp->rept_wds_wcid = 0xff;
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key);
+
+ txp = txwi + MT_TXD_SIZE;
+ memset(txp, 0, sizeof(struct mt7615_txp_common));
+ if (is_mt7615(&dev->mt76))
+ mt7615_write_fw_txp(dev, tx_info, txp, id);
+ else
+ mt7615_write_hw_txp(dev, tx_info, txp, id);
+
tx_info->skb = DMA_DUMMY_DATA;
return 0;
@@ -962,6 +1227,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
{
struct ieee80211_supported_band *sband;
struct mt7615_rate_set *rs;
+ struct mt76_phy *mphy;
int first_idx = 0, last_idx;
int i, idx, count;
bool fixed_rate, ack_timeout;
@@ -1019,7 +1285,12 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
spin_lock_bh(&dev->mt76.lock);
if (sta->rate_probe) {
- mt7615_mac_set_rates(dev, sta, NULL, sta->rates);
+ struct mt7615_phy *phy = &dev->phy;
+
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ phy = dev->mt76.phy2->priv;
+
+ mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
sta->rate_probe = false;
}
spin_unlock_bh(&dev->mt76.lock);
@@ -1059,10 +1330,14 @@ out:
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
+ mphy = &dev->mphy;
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
else
- sband = &dev->mt76.sband_2g.sband;
+ sband = &mphy->sband_2g.sband;
final_rate &= MT_TX_RATE_IDX;
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
@@ -1105,6 +1380,8 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
if (pid < MT_PACKET_ID_FIRST)
return false;
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
if (skb) {
@@ -1128,6 +1405,7 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct ieee80211_sta *sta = NULL;
struct mt7615_sta *msta = NULL;
struct mt76_wcid *wcid;
+ struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
u32 txs;
u8 wcidx;
@@ -1164,111 +1442,154 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcidx >= MT7615_WTBL_STA || !sta)
goto out;
+ if (wcid->ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
if (mt7615_fill_txs(dev, msta, &info, txs_data))
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+ ieee80211_tx_status_noskb(mphy->hw, sta, &info);
out:
rcu_read_unlock();
}
-void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void
+mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
+
+ trace_mac_tx_free(dev, token);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ return;
+
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt76_tx_complete_skb(mdev, txwi->skb);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+}
+
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
- for (i = 0; i < count; i++) {
- spin_lock_bh(&dev->token_lock);
- txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
- spin_unlock_bh(&dev->token_lock);
-
- if (!txwi)
- continue;
+ if (is_mt7615(&dev->mt76)) {
+ __le16 *token = &free->token[0];
- mt7615_txp_skb_unmap(mdev, txwi);
- if (txwi->skb) {
- mt76_tx_complete_skb(mdev, txwi->skb);
- txwi->skb = NULL;
- }
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
+ } else {
+ __le32 *token = (__le32 *)&free->token[0];
- mt76_put_txwi(mdev, txwi);
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
+
dev_kfree_skb(skb);
}
static void
-mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev)
+mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
{
- mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_OFDM_MASK,
- MT_WF_PHY_B0_PD_OFDM(0x13c));
- mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
- MT_WF_PHY_B1_PD_OFDM_MASK,
- MT_WF_PHY_B1_PD_OFDM(0x13c));
-
- mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
- MT_WF_PHY_B0_PD_CCK_MASK,
- MT_WF_PHY_B0_PD_CCK(0x92));
- mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
- MT_WF_PHY_B1_PD_CCK_MASK,
- MT_WF_PHY_B1_PD_CCK(0x92));
-
- dev->ofdm_sensitivity = -98;
- dev->cck_sensitivity = -110;
- dev->last_cca_adj = jiffies;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, 0x13c));
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, 0x92));
+
+ phy->ofdm_sensitivity = -98;
+ phy->cck_sensitivity = -110;
+ phy->last_cca_adj = jiffies;
}
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
{
+ struct mt7615_phy *ext_phy;
+
mutex_lock(&dev->mt76.mutex);
if (dev->scs_en == enable)
goto out;
if (enable) {
- /* DBDC not supported */
- mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_BLK);
+ mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(0),
+ MT_WF_PHY_PD_BLK(0));
+ mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(1),
+ MT_WF_PHY_PD_BLK(1));
if (is_mt7622(&dev->mt76)) {
mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
}
} else {
- mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_BLK);
- mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
- MT_WF_PHY_B1_PD_BLK);
+ mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(0),
+ MT_WF_PHY_PD_BLK(0));
+ mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(1),
+ MT_WF_PHY_PD_BLK(1));
}
- mt7615_mac_set_default_sensitivity(dev);
+ mt7615_mac_set_default_sensitivity(&dev->phy);
+ ext_phy = mt7615_ext_phy(dev);
+ if (ext_phy)
+ mt7615_mac_set_default_sensitivity(ext_phy);
+
dev->scs_en = enable;
out:
mutex_unlock(&dev->mt76.mutex);
}
-void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev)
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
{
- mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20));
- mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20));
+ u32 rxtd;
+
+ if (ext_phy)
+ rxtd = MT_WF_PHY_RXTD2(10);
+ else
+ rxtd = MT_WF_PHY_RXTD(12);
+
+ mt76_set(dev, rxtd, BIT(18) | BIT(29));
+ mt76_set(dev, MT_WF_PHY_R0_PHYMUX_5(ext_phy), 0x5 << 12);
+}
+
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+
+ mt76_clear(dev, reg, GENMASK(22, 20));
+ mt76_set(dev, reg, BIT(22) | BIT(20));
}
static void
-mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
+mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
u32 rts_err_rate, bool ofdm)
{
- int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck;
+ struct mt7615_dev *dev = phy->dev;
+ int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
+ bool ext_phy = phy != &dev->phy;
u16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
- sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity;
- signal = mt76_get_min_avg_rssi(&dev->mt76);
+ sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
+ signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
if (!signal) {
- mt7615_mac_set_default_sensitivity(dev);
+ mt7615_mac_set_default_sensitivity(phy);
return;
}
@@ -1303,99 +1624,156 @@ mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
u16 val;
if (ofdm) {
- /* DBDC not supported */
val = *sensitivity * 2 + 512;
- mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_OFDM_MASK,
- MT_WF_PHY_B0_PD_OFDM(val));
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, val));
} else {
val = *sensitivity + 256;
- mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
- MT_WF_PHY_B0_PD_CCK_MASK,
- MT_WF_PHY_B0_PD_CCK(val));
- mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
- MT_WF_PHY_B1_PD_CCK_MASK,
- MT_WF_PHY_B1_PD_CCK(val));
+ if (!ext_phy)
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
}
- dev->last_cca_adj = jiffies;
+ phy->last_cca_adj = jiffies;
}
}
static void
-mt7615_mac_scs_check(struct mt7615_dev *dev)
+mt7615_mac_scs_check(struct mt7615_phy *phy)
{
- u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0;
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ u32 val, rts_err_rate = 0;
u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
- int i;
+ bool ext_phy = phy != &dev->phy;
if (!dev->scs_en)
return;
- for (i = 0; i < 4; i++) {
- u32 data;
-
- val = mt76_rr(dev, MT_MIB_MB_SDR0(i));
- data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- if (data > rts_retries_cnt) {
- rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- rts_retries_cnt = data;
- }
- }
-
- val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0);
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
- val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5);
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
- dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
- dev->false_cca_cck = pd_cck - mdrdy_cck;
- mt7615_mac_cca_stats_reset(dev);
+ phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+ phy->false_cca_cck = pd_cck - mdrdy_cck;
+ mt7615_mac_cca_stats_reset(phy);
- if (rts_cnt + rts_retries_cnt)
- rts_err_rate = MT_FRAC(rts_retries_cnt,
- rts_cnt + rts_retries_cnt);
+ if (mib->rts_cnt + mib->rts_retries_cnt)
+ rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
+ mib->rts_cnt + mib->rts_retries_cnt);
/* cck */
- mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false);
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
/* ofdm */
- mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true);
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
- if (time_after(jiffies, dev->last_cca_adj + 10 * HZ))
- mt7615_mac_set_default_sensitivity(dev);
+ if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
+ mt7615_mac_set_default_sensitivity(phy);
}
-void mt7615_update_channel(struct mt76_dev *mdev)
+static u8
+mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
+ u32 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
+ u32 val, sum = 0, n = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
+ val = mt76_rr(dev, reg);
+ sum += val * nf_power[i];
+ n += val;
+ }
+
+ if (!n)
+ return 0;
+
+ return sum / n;
+}
+
+static void
+mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
+ struct mt7615_phy *phy = mphy->priv;
struct mt76_channel_state *state;
u64 busy_time, tx_time, rx_time, obss_time;
+ u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
+ int nf;
- /* TODO: add DBDC support */
- busy_time = mt76_get_field(dev, MT_MIB_SDR9(0),
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
MT_MIB_SDR9_BUSY_MASK);
- tx_time = mt76_get_field(dev, MT_MIB_SDR36(0),
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
MT_MIB_SDR36_TXTIME_MASK);
- rx_time = mt76_get_field(dev, MT_MIB_SDR37(0),
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
MT_MIB_SDR37_RXTIME_MASK);
- obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_TIME5,
- MT_MIB_OBSSTIME_MASK);
+ obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
+
+ nf = mt7615_phy_get_nf(dev, idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
- state = mdev->chan_state;
+ state = mphy->chan_state;
state->cc_busy += busy_time;
state->cc_tx += tx_time;
state->cc_rx += rx_time + obss_time;
state->cc_bss_rx += rx_time;
+ state->noise = -(phy->noise >> 4);
+}
+
+void mt7615_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_phy_update_channel(&mdev->phy, 0);
+ if (mdev->phy2)
+ mt7615_phy_update_channel(mdev->phy2, 1);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+static void
+mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ bool ext_phy = phy != &dev->phy;
+ int i;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ for (i = 0; i < 4; i++) {
+ u32 data, val, val2;
+
+ val = mt76_get_field(dev, MT_MIB_MB_SDR1(ext_phy, i),
+ MT_MIB_ACK_FAIL_COUNT_MASK);
+ if (val > mib->ack_fail_cnt)
+ mib->ack_fail_cnt = val;
+
+ val2 = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val2);
+ if (data > mib->rts_retries_cnt) {
+ mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val2);
+ mib->rts_retries_cnt = data;
+ }
+ }
+}
+
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_dev *dev;
+ struct mt7615_phy *ext_phy;
int i, idx;
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
@@ -1404,7 +1782,15 @@ void mt7615_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
mt76_update_survey(&dev->mt76);
if (++dev->mac_work_count == 5) {
- mt7615_mac_scs_check(dev);
+ ext_phy = mt7615_ext_phy(dev);
+
+ mt7615_mac_update_mib_stats(&dev->phy);
+ mt7615_mac_scs_check(&dev->phy);
+ if (ext_phy) {
+ mt7615_mac_update_mib_stats(ext_phy);
+ mt7615_mac_scs_check(ext_phy);
+ }
+
dev->mac_work_count = 0;
}
@@ -1421,21 +1807,139 @@ void mt7615_mac_work(struct work_struct *work)
MT7615_WATCHDOG_TIME);
}
-int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev)
+static bool
+mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- int err;
+ bool ret;
- err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7615_RESET_TIMEOUT);
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80)
- err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1,
- MT_RX_SEL0, 0);
- return err;
+static void
+mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+
+ mt7615_mcu_set_bcn(hw, vif, vif->bss_conf.enable_beacon);
+}
+
+static void
+mt7615_update_beacons(struct mt7615_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+static void
+mt7615_dma_reset(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+
+void mt7615_mac_reset_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, reset_work);
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (dev->mt76.phy2)
+ ieee80211_stop_queues(dev->mt76.phy2->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (dev->mt76.phy2)
+ mt76_txq_schedule_all(dev->mt76.phy2);
+
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
+
+ if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7615_dma_reset(dev);
+
+ mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (dev->mt76.phy2)
+ ieee80211_wake_queues(dev->mt76.phy2->hw);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7615_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+}
+
+static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
@@ -1450,61 +1954,112 @@ static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
MT_RX_SEL0, 1);
}
-int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev)
+static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0,
- MT_RX_SEL0, 0);
+ err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
if (err < 0)
return err;
- /* TODO: DBDC support */
-
- err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0);
+ err = mt7615_dfs_start_rdd(dev, ext_phy);
if (err < 0)
return err;
+ phy->rdd_state |= BIT(ext_phy);
+
if (chandef->width == NL80211_CHAN_WIDTH_160 ||
chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1);
+ err = mt7615_dfs_start_rdd(dev, 1);
if (err < 0)
return err;
+
+ phy->rdd_state |= BIT(1);
}
return 0;
}
-int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev)
+static int
+mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
+{
+ const struct mt7615_dfs_radar_spec *radar_specs;
+ struct mt7615_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7615_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7615_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET)
+ if (dev->mt76.region == NL80211_DFS_UNSET) {
+ phy->dfs_state = -1;
+ if (phy->rdd_state)
+ goto stop;
+
return 0;
+ }
- if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ if (test_bit(MT76_SCANNING, &phy->mt76->state))
return 0;
- if (dev->dfs_state == chandef->chan->dfs_state)
+ if (phy->dfs_state == chandef->chan->dfs_state)
return 0;
- dev->dfs_state = chandef->chan->dfs_state;
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0) {
+ phy->dfs_state = -1;
+ goto stop;
+ }
+
+ phy->dfs_state = chandef->chan->dfs_state;
if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(dev);
- else
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- } else {
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START,
- MT_HW_RDD0, MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ return mt7615_dfs_start_radar_detector(phy);
- return mt7615_dfs_stop_radar_detector(dev);
+ return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
+ MT_RX_SEL0, 0);
}
+
+stop:
+ err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7615_dfs_stop_radar_detector(phy);
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 38695d4f92e2..6fa7e3dd6a3a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -103,6 +103,11 @@ enum rx_pkt_type {
#define MT_RXV4_RCPI1 GENMASK(15, 8)
#define MT_RXV4_RCPI0 GENMASK(7, 0)
+#define MT_RXV6_NF3 GENMASK(31, 24)
+#define MT_RXV6_NF2 GENMASK(23, 16)
+#define MT_RXV6_NF1 GENMASK(15, 8)
+#define MT_RXV6_NF0 GENMASK(7, 0)
+
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
@@ -126,6 +131,10 @@ enum tx_pkt_queue_idx {
MT_LMAC_BMC0,
MT_LMAC_BCN0,
MT_LMAC_PSMP0,
+ MT_LMAC_ALTX1,
+ MT_LMAC_BMC1,
+ MT_LMAC_BCN1,
+ MT_LMAC_PSMP1,
};
enum tx_port_idx {
@@ -229,8 +238,27 @@ enum tx_phy_bandwidth {
#define MT_TX_RATE_IDX GENMASK(5, 0)
#define MT_TXP_MAX_BUF_NUM 6
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
+
+struct mt7615_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
-struct mt7615_txp {
+struct mt7615_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt7615_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt7615_fw_txp {
__le16 flags;
__le16 token;
u8 bss_idx;
@@ -239,7 +267,14 @@ struct mt7615_txp {
u8 nbuf;
__le32 buf[MT_TXP_MAX_BUF_NUM];
__le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed;
+} __packed __aligned(4);
+
+struct mt7615_txp_common {
+ union {
+ struct mt7615_fw_txp fw;
+ struct mt7615_hw_txp hw;
+ };
+};
struct mt7615_tx_free {
__le16 rx_byte_cnt;
@@ -247,7 +282,7 @@ struct mt7615_tx_free {
u8 txd_cnt;
u8 rsv[3];
__le16 token[];
-} __packed;
+} __packed __aligned(4);
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
@@ -302,6 +337,38 @@ struct mt7615_tx_free {
#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
+struct mt7615_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7615_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u8 max_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+};
+
+struct mt7615_dfs_radar_spec {
+ struct mt7615_dfs_pulse pulse_th;
+ struct mt7615_dfs_pattern radar_pattern[16];
+};
+
enum mt7615_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
@@ -317,7 +384,7 @@ enum mt7615_cipher_type {
MT_CIPHER_GCMP_256,
};
-static inline struct mt7615_txp *
+static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
u8 *txwi;
@@ -327,7 +394,7 @@ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
txwi = mt76_get_txwi_ptr(dev, t);
- return (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 070b03403894..01194ed79869 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -11,27 +11,85 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7615.h"
+#include "mcu.h"
+
+static bool mt7615_dev_running(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7615_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
static int mt7615_start(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool running;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return -EIO;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7615_dev_running(dev);
+
+ if (!running) {
+ mt7615_mcu_ctrl_pm_state(dev, 0, 0);
+ mt7615_mcu_set_mac_enable(dev, 0, true);
+ mt7615_mac_enable_nf(dev, 0);
+ }
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_ctrl_pm_state(dev, 1, 0);
+ mt7615_mcu_set_mac_enable(dev, 1, true);
+ mt7615_mac_enable_nf(dev, 1);
+ }
+
+ mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (running)
+ goto out;
mt7615_mac_reset_counters(dev);
- dev->mt76.survey_time = ktime_get_boottime();
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7615_WATCHDOG_TIME);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
return 0;
}
static void mt7615_stop(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_ctrl_pm_state(dev, 1, 1);
+ mt7615_mcu_set_mac_enable(dev, 1, false);
+ }
+
+ if (!mt7615_dev_running(dev)) {
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+ mt7615_mcu_ctrl_pm_state(dev, 0, 1);
+ mt7615_mcu_set_mac_enable(dev, 0, false);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
@@ -39,6 +97,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
int i;
switch (type) {
+ case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
@@ -70,8 +129,10 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
int idx, ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -89,9 +150,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
}
mvif->omac_idx = idx;
- /* TODO: DBDC support. Use band 0 for now */
- mvif->band_idx = 0;
- mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
+ mvif->band_idx = ext_phy;
+ if (mt7615_ext_phy(dev))
+ mvif->wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
+ mvif->idx % (MT7615_MAX_WMM_SETS / 2);
+ else
+ mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
ret = mt7615_mcu_set_dev_info(dev, vif, 1);
if (ret)
@@ -99,18 +163,25 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
+ phy->omac_mask |= BIT(mvif->omac_idx);
+
+ mt7615_mcu_set_dbdc(dev);
+
idx = MT7615_WTBL_RESERVED - mvif->idx;
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
- mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+ }
out:
mutex_unlock(&dev->mt76.mutex);
@@ -123,7 +194,8 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = &mvif->sta;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
int idx = msta->wcid.idx;
/* TODO: disable beacon for the bss */
@@ -131,11 +203,13 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt7615_mcu_set_dev_info(dev, vif, 0);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mt76_txq_remove(&dev->mt76, vif->txq);
+ if (vif->txq)
+ mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
+ phy->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
spin_lock_bh(&dev->sta_poll_lock);
@@ -144,34 +218,38 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&dev->sta_poll_lock);
}
-static int mt7615_set_channel(struct mt7615_dev *dev)
+static int mt7615_set_channel(struct mt7615_phy *phy)
{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int ret;
cancel_delayed_work_sync(&dev->mt76.mac_work);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &phy->mt76->state);
- mt7615_dfs_check_channel(dev);
+ phy->dfs_state = -1;
+ mt76_set_channel(phy->mt76);
- mt76_set_channel(&dev->mt76);
-
- ret = mt7615_mcu_set_channel(dev);
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
if (ret)
goto out;
- ret = mt7615_dfs_init_radar_detector(dev);
- mt7615_mac_cca_stats_reset(dev);
- dev->mt76.survey_time = ktime_get_boottime();
+ mt7615_mac_set_timing(phy);
+ ret = mt7615_dfs_init_radar_detector(phy);
+ mt7615_mac_cca_stats_reset(phy);
+ mt7615_mcu_set_sku_en(phy, true);
mt7615_mac_reset_counters(dev);
+ phy->noise = 0;
+ phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &phy->mt76->state);
mutex_unlock(&dev->mt76.mutex);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(phy->mt76);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7615_WATCHDOG_TIME);
return ret;
@@ -181,7 +259,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@@ -230,27 +308,27 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
int ret = 0;
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER)) {
ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(dev);
+ ret = mt7615_set_channel(phy);
ieee80211_wake_queues(hw);
}
mutex_lock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_POWER)
- ret = mt7615_mcu_set_tx_power(dev);
-
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
- dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
- dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
- mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
mutex_unlock(&dev->mt76.mutex);
@@ -263,7 +341,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
@@ -275,7 +353,10 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
+
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -285,21 +366,21 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- dev->mt76.rxfilter &= ~(_hw); \
- dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
- dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
@@ -313,12 +394,12 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1, ctl_flags);
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
else
- mt76_set(dev, MT_WF_RFCR1, ctl_flags);
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
}
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@@ -326,24 +407,32 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info,
u32 changed)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC)
mt7615_mcu_set_bss_info(dev, vif, info->assoc);
- /* TODO: update beacon content
- * BSS_CHANGED_BEACON
- */
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7615_mac_set_timing(phy);
+ }
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
mt7615_mcu_set_bss_info(dev, vif, info->enable_beacon);
- mt7615_mcu_wtbl_bmc(dev, vif, info->enable_beacon);
- mt7615_mcu_set_sta_rec_bmc(dev, vif, info->enable_beacon);
- mt7615_mcu_set_bcn(dev, vif, info->enable_beacon);
+ mt7615_mcu_set_bmc(dev, vif, info->enable_beacon);
}
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7615_mcu_set_bcn(hw, vif, info->enable_beacon);
+
mutex_unlock(&dev->mt76.mutex);
}
@@ -352,15 +441,15 @@ mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7615_mcu_set_bcn(dev, vif, true);
+ mt7615_mcu_set_bcn(hw, vif, true);
mutex_unlock(&dev->mt76.mutex);
}
-int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -375,33 +464,23 @@ int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->band_idx;
+
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7615_mcu_add_wtbl(dev, vif, sta);
- mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
+ mt7615_mcu_set_sta(dev, vif, sta, 1);
return 0;
}
-void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
- if (sta->ht_cap.ht_supported)
- mt7615_mcu_set_ht_cap(dev, vif, sta);
-}
-
-void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
- mt7615_mcu_del_wtbl(dev, sta);
-
+ mt7615_mcu_set_sta(dev, vif, sta, 0);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -415,7 +494,8 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
@@ -430,7 +510,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
break;
}
msta->n_rates = i;
- mt7615_mac_set_rates(dev, msta, NULL, msta->rates);
+ mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
msta->rate_probe = false;
spin_unlock_bh(&dev->mt76.lock);
}
@@ -439,7 +519,8 @@ static void mt7615_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
@@ -458,15 +539,16 @@ static void mt7615_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(mphy, control->sta, wcid, skb);
}
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
- mt7615_mcu_set_rts_thresh(dev, val);
+ mt7615_mcu_set_rts_thresh(phy, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -477,7 +559,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -527,6 +609,92 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return ret;
}
+static int
+mt7615_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7615_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7615_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ return 0;
+}
+
+static u64
+mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return tsf.t64;
+}
+
+static void
+mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7615_mac_set_timing(phy);
+}
+
+static int
+mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+ phy->chainmask = ext_phy ? tx_ant << 2 : tx_ant;
+
+ mt76_set_stream_caps(&dev->mt76, true);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt7615_ops = {
.tx = mt7615_tx,
.start = mt7615_start,
@@ -537,7 +705,9 @@ const struct ieee80211_ops mt7615_ops = {
.conf_tx = mt7615_conf_tx,
.configure_filter = mt7615_configure_filter,
.bss_info_changed = mt7615_bss_info_changed,
- .sta_state = mt76_sta_state,
+ .sta_add = mt7615_sta_add,
+ .sta_remove = mt7615_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7615_set_key,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
@@ -548,6 +718,38 @@ const struct ieee80211_ops mt7615_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7615_channel_switch_beacon,
+ .get_stats = mt7615_get_stats,
+ .get_tsf = mt7615_get_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
+ .set_antenna = mt7615_set_antenna,
+ .set_coverage_class = mt7615_set_coverage_class,
};
+
+static int __init mt7615_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7615_pci_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
+ ret = platform_driver_register(&mt7622_wmac_driver);
+ if (ret)
+ pci_unregister_driver(&mt7615_pci_driver);
+ }
+
+ return ret;
+}
+
+static void __exit mt7615_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7622_WMAC))
+ platform_driver_unregister(&mt7622_wmac_driver);
+ pci_unregister_driver(&mt7615_pci_driver);
+}
+
+module_init(mt7615_init);
+module_exit(mt7615_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index f229c9ce9f65..7218a3041ead 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -29,7 +29,8 @@ struct mt7615_fw_trailer {
__le32 len;
} __packed;
-#define MCU_PATCH_ADDRESS 0x80000
+#define MT7615_PATCH_ADDRESS 0x80000
+#define MT7622_PATCH_ADDRESS 0x9c000
#define N9_REGION_NUM 2
#define CR4_REGION_NUM 1
@@ -57,9 +58,9 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
u32 val;
__le32 *txd;
- seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
- seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
sizeof(struct mt7615_mcu_txd));
@@ -104,7 +105,7 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
if (wait_seq)
*wait_seq = seq;
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
qid = MT_TXQ_MCU;
else
qid = MT_TXQ_FWDL;
@@ -144,7 +145,7 @@ mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- unsigned long expires = jiffies + 10 * HZ;
+ unsigned long expires = jiffies + 20 * HZ;
struct sk_buff *skb;
int ret, seq;
@@ -152,7 +153,7 @@ mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
@@ -173,7 +174,7 @@ mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -186,20 +187,59 @@ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_mcu_rdd_report *r;
+
+ r = (struct mt7615_mcu_rdd_report *)skb->data;
+
+ if (r->idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1];
+ const char *type;
+
+ switch (rxd->s2d_index) {
+ case 0:
+ type = "N9";
+ break;
+ case 2:
+ type = "CR4";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+}
+
+static void
mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
switch (rxd->ext_eid) {
case MCU_EXT_EVENT_RDD_REPORT:
- ieee80211_radar_detected(dev->mt76.hw);
- dev->hw_pattern++;
+ mt7615_mcu_rx_radar_detected(dev, skb);
break;
case MCU_EXT_EVENT_CSA_NOTIFY:
ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_mcu_csa_finish, dev);
break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7615_mcu_rx_log_message(dev, skb);
+ break;
default:
break;
}
@@ -319,19 +359,50 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
+static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+{
+ if (!is_mt7622(&dev->mt76))
+ return;
+
+ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+ MT_INFRACFG_MISC_AP2CONN_WAKE,
+ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+}
+
static int mt7615_driver_own(struct mt7615_dev *dev)
{
mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN);
+
+ mt7622_trigger_hif_int(dev, true);
if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
- MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) {
+ MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
+ mt7622_trigger_hif_int(dev, false);
+
+ return 0;
+}
+
+static int mt7615_firmware_own(struct mt7615_dev *dev)
+{
+ mt7622_trigger_hif_int(dev, true);
+
+ mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
+
+ if (is_mt7622(&dev->mt76) &&
+ !mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
+ MT_CFG_LPCR_HOST_FW_OWN,
+ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+ return -EIO;
+ }
+ mt7622_trigger_hif_int(dev, false);
return 0;
}
-static int mt7615_load_patch(struct mt7615_dev *dev)
+static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
const struct mt7615_patch_hdr *hdr;
const struct firmware *fw = NULL;
@@ -348,7 +419,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
goto out;
@@ -365,8 +436,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
len = fw->size - sizeof(*hdr);
- ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len,
- DL_MODE_NEED_RSP);
+ ret = mt7615_mcu_init_download(dev, addr, len, DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -444,13 +514,13 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
return 0;
}
-static int mt7615_load_ram(struct mt7615_dev *dev)
+static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
{
const struct mt7615_fw_trailer *hdr;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
return ret;
@@ -477,9 +547,27 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
goto out;
}
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+ if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver)))
+ dev->fw_ver = MT7615_FIRMWARE_V2;
+ else
+ dev->fw_ver = MT7615_FIRMWARE_V1;
+
+out:
release_firmware(fw);
+ return ret;
+}
+
+static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name)
+{
+ const struct mt7615_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
- ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
return ret;
@@ -500,8 +588,10 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
goto out;
ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
- if (ret)
+ if (ret) {
dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+ goto out;
+ }
out:
release_firmware(fw);
@@ -509,6 +599,17 @@ out:
return ret;
}
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_load_n9(dev, MT7615_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ return mt7615_load_cr4(dev, MT7615_FIRMWARE_CR4);
+}
+
static int mt7615_load_firmware(struct mt7615_dev *dev)
{
int ret;
@@ -521,7 +622,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- ret = mt7615_load_patch(dev);
+ ret = mt7615_load_patch(dev, MT7615_PATCH_ADDRESS, MT7615_ROM_PATCH);
if (ret)
return ret;
@@ -536,13 +637,55 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ return 0;
+}
- dev_dbg(dev->mt76.dev, "Firmware init done\n");
+static int mt7622_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ val = mt76_get_field(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE);
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7622_PATCH_ADDRESS, MT7622_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_n9(dev, MT7622_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE,
+ FIELD_PREP(MT_TOP_OFF_RSV_FW_STATE,
+ FW_STATE_NORMAL_TRX), 1500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
return 0;
}
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST,
+ &data, sizeof(data), true);
+}
+
int mt7615_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7615_mcu_ops = {
@@ -557,11 +700,17 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
- ret = mt7615_load_firmware(dev);
+ if (is_mt7622(&dev->mt76))
+ ret = mt7622_load_firmware(dev);
+ else
+ ret = mt7615_load_firmware(dev);
if (ret)
return ret;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7615_mcu_fw_log_2_host(dev, 0);
return 0;
}
@@ -569,8 +718,8 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
- skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+ mt7615_firmware_own(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
}
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
@@ -578,21 +727,26 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
struct {
u8 buffer_mode;
u8 pad;
- u16 len;
+ __le16 len;
} __packed req_hdr = {
.buffer_mode = 1,
- .len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
};
- int ret, len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
+ int ret, len, eep_len;
u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+ if (is_mt7622(&dev->mt76))
+ eep_len = MT7622_EE_MAX - MT_EE_NIC_CONF_0;
+ else
+ eep_len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+
+ len = sizeof(req_hdr) + eep_len;
req = kzalloc(len, GFP_KERNEL);
if (!req)
return -ENOMEM;
+ req_hdr.len = cpu_to_le16(eep_len);
memcpy(req, &req_hdr, sizeof(req_hdr));
- memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0,
- __MT_EE_MAX - MT_EE_NIC_CONF_0);
+ memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0, eep_len);
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
req, len, true);
@@ -601,23 +755,24 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
return ret;
}
-int mt7615_mcu_init_mac(struct mt7615_dev *dev)
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable)
{
struct {
u8 enable;
u8 band;
u8 rsv[2];
} __packed req = {
- .enable = 1,
- .band = 0,
+ .enable = enable,
+ .band = band,
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val)
{
+ struct mt7615_dev *dev = phy->dev;
struct {
u8 prot_idx;
u8 band;
@@ -626,7 +781,7 @@ int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
__le32 pkt_thresh;
} __packed req = {
.prot_idx = 1,
- .band = 0,
+ .band = phy != &dev->phy,
.len_thresh = cpu_to_le32(val),
.pkt_thresh = cpu_to_le32(0x2),
};
@@ -672,7 +827,7 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
&req, sizeof(req), true);
}
-int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int enter)
{
#define ENTER_PM_STATE 1
#define EXIT_PM_STATE 2
@@ -695,13 +850,72 @@ int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
} __packed req = {
.pm_number = 5,
.pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = 0,
+ .band_idx = band,
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
&req, sizeof(req), true);
}
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
+ struct dbdc_entry {
+ u8 type;
+ u8 index;
+ u8 band;
+ u8 _rsv;
+ };
+ struct {
+ u8 enable;
+ u8 num;
+ u8 _rsv[2];
+ struct dbdc_entry entry[64];
+ } req = {
+ .enable = !!ext_phy,
+ };
+ int i;
+
+ if (!ext_phy)
+ goto out;
+
+#define ADD_DBDC_ENTRY(_type, _idx, _band) \
+ do { \
+ req.entry[req.num].type = _type; \
+ req.entry[req.num].index = _idx; \
+ req.entry[req.num++].band = _band; \
+ } while (0)
+
+ for (i = 0; i < 4; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(i));
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band);
+ }
+
+ for (i = 0; i < 14; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(0x11 + i));
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band);
+ }
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_MU, 0, 1);
+
+ for (i = 0; i < 3; i++)
+ ADD_DBDC_ENTRY(DBDC_TYPE_BF, i, 1);
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 1, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 2, 1);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 3, 1);
+
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1);
+
+out:
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL,
+ &req, sizeof(req), true);
+}
+
int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable)
{
@@ -898,124 +1112,55 @@ int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
return ret;
}
-static int
-mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev,
- struct mt7615_vif *mvif)
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
{
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_generic g_wtbl;
- struct wtbl_rx rx_wtbl;
- } req = {
- .hdr = {
- .wlan_idx = mvif->sta.wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- .tlv_num = cpu_to_le16(2),
- },
- .g_wtbl = {
- .tag = cpu_to_le16(WTBL_GENERIC),
- .len = cpu_to_le16(sizeof(struct wtbl_generic)),
- .muar_idx = 0xe,
- },
- .rx_wtbl = {
- .tag = cpu_to_le16(WTBL_RX),
- .len = cpu_to_le16(sizeof(struct wtbl_rx)),
- .rca1 = 1,
- .rca2 = 1,
- .rv = 1,
- },
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
};
- eth_broadcast_addr(req.g_wtbl.peer_addr);
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
&req, sizeof(req), true);
}
-int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
+static int
+mt7615_mcu_send_sta_rec(struct mt7615_dev *dev, u8 *req, u8 *wreq,
+ u8 wlen, bool enable)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ bool is_v1 = (dev->fw_ver == MT7615_FIRMWARE_V1);
+ u32 slen = is_v1 ? wreq - req : wreq - req + wlen;
+ int ret;
- if (!enable) {
- struct wtbl_req_hdr req = {
- .wlan_idx = mvif->sta.wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- };
+ if (is_v1 && !enable) {
+ ret = __mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD_STA_REC_UPDATE,
+ req, slen, true);
+ if (ret)
+ return ret;
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
+ wreq, wlen, true);
}
- return mt7615_mcu_add_wtbl_bmc(dev, mvif);
-}
-
-int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_generic g_wtbl;
- struct wtbl_rx rx_wtbl;
- } req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- .tlv_num = cpu_to_le16(2),
- },
- .g_wtbl = {
- .tag = cpu_to_le16(WTBL_GENERIC),
- .len = cpu_to_le16(sizeof(struct wtbl_generic)),
- .muar_idx = mvif->omac_idx,
- .qos = sta->wme,
- .partial_aid = cpu_to_le16(sta->aid),
- },
- .rx_wtbl = {
- .tag = cpu_to_le16(WTBL_RX),
- .len = cpu_to_le16(sizeof(struct wtbl_rx)),
- .rca1 = vif->type != NL80211_IFTYPE_AP,
- .rca2 = 1,
- .rv = 1,
- },
- };
- memcpy(req.g_wtbl.peer_addr, sta->addr, ETH_ALEN);
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr req = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- };
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
-{
- struct wtbl_req_hdr req = {
- .operation = WTBL_RESET_ALL,
- };
+ if (is_v1) {
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ wreq, wlen, true);
+ if (ret)
+ return ret;
+ }
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ req, slen, true);
}
-int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool en)
+int mt7615_mcu_set_bmc(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool en)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
struct sta_req_hdr hdr;
struct sta_rec_basic basic;
- } req = {
+ u8 buf[MT7615_WTBL_UPDATE_MAX_SIZE];
+ } __packed req = {
.hdr = {
.bss_idx = mvif->idx,
.wlan_idx = mvif->sta.wcid.idx,
@@ -1029,8 +1174,26 @@ int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
.conn_type = cpu_to_le32(CONNECTION_INFRA_BC),
},
};
+ struct sta_rec_wtbl *wtbl = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct wtbl_generic *wtbl_g;
+ struct wtbl_rx *wtbl_rx;
+ u8 *buf = req.buf;
+
eth_broadcast_addr(req.basic.peer_addr);
+ if (dev->fw_ver > MT7615_FIRMWARE_V1) {
+ req.hdr.tlv_num = cpu_to_le16(2);
+ wtbl = (struct sta_rec_wtbl *)buf;
+ wtbl->tag = cpu_to_le16(STA_REC_WTBL);
+ buf += sizeof(*wtbl);
+ }
+
+ wtbl_hdr = (struct wtbl_req_hdr *)buf;
+ buf += sizeof(*wtbl_hdr);
+ wtbl_hdr->wlan_idx = mvif->sta.wcid.idx;
+ wtbl_hdr->operation = WTBL_RESET_AND_SET;
+
if (en) {
req.basic.conn_state = CONN_STATE_PORT_SECURE;
req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
@@ -1038,14 +1201,36 @@ int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
} else {
req.basic.conn_state = CONN_STATE_DISCONNECT;
req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ goto out;
}
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &req, sizeof(req), true);
+ wtbl_g = (struct wtbl_generic *)buf;
+ buf += sizeof(*wtbl_g);
+ wtbl_g->tag = cpu_to_le16(WTBL_GENERIC);
+ wtbl_g->len = cpu_to_le16(sizeof(*wtbl_g));
+ wtbl_g->muar_idx = 0xe;
+ eth_broadcast_addr(wtbl_g->peer_addr);
+
+ wtbl_rx = (struct wtbl_rx *)buf;
+ buf += sizeof(*wtbl_rx);
+ wtbl_rx->tag = cpu_to_le16(WTBL_RX);
+ wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
+ wtbl_rx->rv = 1;
+ wtbl_rx->rca1 = 1;
+ wtbl_rx->rca2 = 1;
+
+ wtbl_hdr->tlv_num = cpu_to_le16(2);
+
+out:
+ if (wtbl)
+ wtbl->len = cpu_to_le16(buf - (u8 *)wtbl_hdr);
+
+ return mt7615_mcu_send_sta_rec(dev, (u8 *)&req, (u8 *)wtbl_hdr,
+ buf - (u8 *)wtbl_hdr, en);
}
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en)
+int mt7615_mcu_set_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -1053,11 +1238,11 @@ int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
struct {
struct sta_req_hdr hdr;
struct sta_rec_basic basic;
- } req = {
+ u8 buf[MT7615_WTBL_UPDATE_MAX_SIZE];
+ } __packed req = {
.hdr = {
.bss_idx = mvif->idx,
.wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
.is_tlv_append = 1,
.muar_idx = mvif->omac_idx,
},
@@ -1068,6 +1253,13 @@ int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
.aid = cpu_to_le16(sta->aid),
},
};
+ struct sta_rec_wtbl *wtbl = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct wtbl_generic *wtbl_g;
+ struct wtbl_rx *wtbl_rx;
+ u8 *buf = req.buf;
+ u8 wtlv = 0, stlv = 1;
+
memcpy(req.basic.peer_addr, sta->addr, ETH_ALEN);
switch (vif->type) {
@@ -1090,21 +1282,161 @@ int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
req.basic.conn_state = CONN_STATE_PORT_SECURE;
req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
EXTRA_INFO_NEW);
+
+ /* sta_rec ht */
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *sta_ht;
+
+ sta_ht = (struct sta_rec_ht *)buf;
+ buf += sizeof(*sta_ht);
+ sta_ht->tag = cpu_to_le16(STA_REC_HT);
+ sta_ht->len = cpu_to_le16(sizeof(*sta_ht));
+ sta_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ stlv++;
+
+ /* sta_rec vht */
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *sta_vht;
+
+ sta_vht = (struct sta_rec_vht *)buf;
+ buf += sizeof(*sta_vht);
+ sta_vht->tag = cpu_to_le16(STA_REC_VHT);
+ sta_vht->len = cpu_to_le16(sizeof(*sta_vht));
+ sta_vht->vht_cap =
+ cpu_to_le32(sta->vht_cap.cap);
+ sta_vht->vht_rx_mcs_map =
+ sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta_vht->vht_tx_mcs_map =
+ sta->vht_cap.vht_mcs.tx_mcs_map;
+ stlv++;
+ }
+ }
} else {
req.basic.conn_state = CONN_STATE_DISCONNECT;
req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
}
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &req, sizeof(req), true);
+ /* wtbl */
+ if (dev->fw_ver > MT7615_FIRMWARE_V1) {
+ wtbl = (struct sta_rec_wtbl *)buf;
+ wtbl->tag = cpu_to_le16(STA_REC_WTBL);
+ buf += sizeof(*wtbl);
+ stlv++;
+ }
+
+ wtbl_hdr = (struct wtbl_req_hdr *)buf;
+ buf += sizeof(*wtbl_hdr);
+ wtbl_hdr->wlan_idx = msta->wcid.idx;
+ wtbl_hdr->operation = WTBL_RESET_AND_SET;
+
+ if (!en)
+ goto out;
+
+ wtbl_g = (struct wtbl_generic *)buf;
+ buf += sizeof(*wtbl_g);
+ wtbl_g->tag = cpu_to_le16(WTBL_GENERIC);
+ wtbl_g->len = cpu_to_le16(sizeof(*wtbl_g));
+ wtbl_g->muar_idx = mvif->omac_idx;
+ wtbl_g->qos = sta->wme;
+ wtbl_g->partial_aid = cpu_to_le16(sta->aid);
+ memcpy(wtbl_g->peer_addr, sta->addr, ETH_ALEN);
+ wtlv++;
+
+ wtbl_rx = (struct wtbl_rx *)buf;
+ buf += sizeof(*wtbl_rx);
+ wtbl_rx->tag = cpu_to_le16(WTBL_RX);
+ wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
+ wtbl_rx->rv = 1;
+ wtbl_rx->rca1 = vif->type != NL80211_IFTYPE_AP;
+ wtbl_rx->rca2 = 1;
+ wtlv++;
+
+ /* wtbl ht */
+ if (sta->ht_cap.ht_supported) {
+ struct wtbl_ht *wtbl_ht;
+ struct wtbl_raw *wtbl_raw;
+ u32 val = 0, msk;
+
+ wtbl_ht = (struct wtbl_ht *)buf;
+ buf += sizeof(*wtbl_ht);
+ wtbl_ht->tag = cpu_to_le16(WTBL_HT);
+ wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
+ wtbl_ht->ht = 1;
+ wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ wtbl_ht->af = sta->ht_cap.ampdu_factor;
+ wtbl_ht->mm = sta->ht_cap.ampdu_density;
+ wtlv++;
+
+ /* wtbl vht */
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *wtbl_vht;
+
+ wtbl_vht = (struct wtbl_vht *)buf;
+ buf += sizeof(*wtbl_vht);
+ wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
+ wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
+ wtbl_vht->vht = 1;
+ wtbl_vht->ldpc = sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_RXLDPC;
+ wtlv++;
+
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ val |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ val |= MT_WTBL_W5_SHORT_GI_160;
+ }
+
+ /* wtbl smps */
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+ struct wtbl_smps *wtbl_smps;
+
+ wtbl_smps = (struct wtbl_smps *)buf;
+ buf += sizeof(*wtbl_smps);
+ wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
+ wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
+ wtbl_smps->smps = 1;
+ wtlv++;
+ }
+
+ /* sgi */
+ msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ val |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ val |= MT_WTBL_W5_SHORT_GI_40;
+
+ wtbl_raw = (struct wtbl_raw *)buf;
+ buf += sizeof(*wtbl_raw);
+ wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
+ wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
+ wtbl_raw->wtbl_idx = 1;
+ wtbl_raw->dw = 5;
+ wtbl_raw->msk = cpu_to_le32(~msk);
+ wtbl_raw->val = cpu_to_le32(val);
+ wtlv++;
+ }
+
+out:
+ if (wtbl)
+ wtbl->len = cpu_to_le16(buf - (u8 *)wtbl_hdr);
+
+ wtbl_hdr->tlv_num = cpu_to_le16(wtlv);
+ req.hdr.tlv_num = cpu_to_le16(stlv);
+
+ return mt7615_mcu_send_sta_rec(dev, (u8 *)&req, (u8 *)wtbl_hdr,
+ buf - (u8 *)wtbl_hdr, en);
}
-int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+int mt7615_mcu_set_bcn(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int en)
{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
struct req {
u8 omac_idx;
u8 enable;
@@ -1128,7 +1460,7 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
};
struct sk_buff *skb;
- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
if (!skb)
return -EINVAL;
@@ -1138,6 +1470,11 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
return -EINVAL;
}
+ if (mvif->band_idx) {
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ }
+
mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
0, NULL);
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
@@ -1156,72 +1493,6 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_tx_power(struct mt7615_dev *dev)
-{
- int i, ret, n_chains = hweight8(dev->mt76.antenna_mask);
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- int freq = chandef->center_freq1, len, target_chains;
- u8 *req, *data, *eep = (u8 *)dev->mt76.eeprom.data;
- enum nl80211_band band = chandef->chan->band;
- struct ieee80211_hw *hw = mt76_hw(dev);
- struct {
- u8 center_chan;
- u8 dbdc_idx;
- u8 band;
- u8 rsv;
- } __packed req_hdr = {
- .center_chan = ieee80211_frequency_to_channel(freq),
- .band = band,
- };
- s8 tx_power;
-
- len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
- req = kzalloc(len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- memcpy(req, &req_hdr, sizeof(req_hdr));
- data = req + sizeof(req_hdr);
- memcpy(data, eep + MT_EE_NIC_CONF_0,
- __MT_EE_MAX - MT_EE_NIC_CONF_0);
-
- tx_power = hw->conf.power_level * 2;
- switch (n_chains) {
- case 4:
- tx_power -= 12;
- break;
- case 3:
- tx_power -= 8;
- break;
- case 2:
- tx_power -= 6;
- break;
- default:
- break;
- }
- tx_power = max_t(s8, tx_power, 0);
- dev->mt76.txpower_cur = tx_power;
-
- target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
- for (i = 0; i < target_chains; i++) {
- int index = -MT_EE_NIC_CONF_0;
-
- ret = mt7615_eeprom_get_power_index(dev, chandef->chan, i);
- if (ret < 0)
- goto out;
-
- index += ret;
- data[index] = min_t(u8, data[index], tx_power);
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
- req, len, true);
-out:
- kfree(req);
-
- return ret;
-}
-
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
u8 rx_sel, u8 val)
@@ -1243,6 +1514,54 @@ int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
&req, sizeof(req), true);
}
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
+{
+ struct {
+ u16 tag;
+ u16 min_lpn;
+ } req = {
+ .tag = 0x1,
+ .min_lpn = val,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse)
+{
+ struct {
+ u16 tag;
+ struct mt7615_dfs_pulse pulse;
+ } req = {
+ .tag = 0x3,
+ };
+
+ memcpy(&req.pulse, pulse, sizeof(*pulse));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern)
+{
+ struct {
+ u16 tag;
+ u16 radar_type;
+ struct mt7615_dfs_pattern pattern;
+ } req = {
+ .tag = 0x2,
+ .radar_type = index,
+ };
+
+ memcpy(&req.pattern, pattern, sizeof(*pattern));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
+}
+
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
{
struct {
@@ -1274,9 +1593,35 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
&req, sizeof(req), false);
}
-int mt7615_mcu_set_channel(struct mt7615_dev *dev)
+static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ int n_chains = hweight8(mphy->antenna_mask);
+ int tx_power;
+ int i;
+
+ tx_power = hw->conf.power_level * 2 -
+ mt76_tx_power_nss_delta(n_chains);
+ mphy->txpower_cur = tx_power;
+
+ for (i = 0; i < MT_SKU_1SS_DELTA; i++)
+ sku[i] = tx_power;
+
+ for (i = 0; i < 4; i++) {
+ int delta = 0;
+
+ if (i < n_chains - 1)
+ delta = mt76_tx_power_nss_delta(n_chains) -
+ mt76_tx_power_nss_delta(i + 1);
+ sku[MT_SKU_1SS_DELTA + i] = delta;
+ }
+}
+
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
struct {
u8 control_chan;
@@ -1299,11 +1644,10 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
} req = {
.control_chan = chandef->chan->hw_value,
.center_chan = ieee80211_frequency_to_channel(freq1),
- .tx_streams = (dev->mt76.chainmask >> 8) & 0xf,
- .rx_streams_mask = dev->mt76.antenna_mask,
+ .tx_streams = hweight8(phy->mt76->antenna_mask),
+ .rx_streams_mask = phy->chainmask,
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- int ret;
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
@@ -1313,7 +1657,9 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
else
req.switch_reason = CH_SWITCH_NORMAL;
- switch (dev->mt76.chandef.width) {
+ req.band_idx = phy != &dev->phy;
+
+ switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
req.bw = CMD_CBW_40MHZ;
break;
@@ -1338,139 +1684,10 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
req.bw = CMD_CBW_20MHZ;
break;
}
- memset(req.txpower_sku, 0x3f, 49);
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
- &req, sizeof(req), true);
- if (ret)
- return ret;
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RX_PATH,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sta_req_hdr *sta_hdr;
- struct wtbl_raw *wtbl_raw;
- struct sta_rec_ht *sta_ht;
- struct wtbl_ht *wtbl_ht;
- int buf_len, ret, ntlv = 2;
- u32 msk, val = 0;
- u8 *buf;
-
- buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- wtbl_hdr = (struct wtbl_req_hdr *)buf;
- wtbl_hdr->wlan_idx = msta->wcid.idx;
- wtbl_hdr->operation = WTBL_SET;
- buf_len = sizeof(*wtbl_hdr);
-
- /* ht basic */
- wtbl_ht = (struct wtbl_ht *)(buf + buf_len);
- wtbl_ht->tag = cpu_to_le16(WTBL_HT);
- wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
- wtbl_ht->ht = 1;
- wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
- wtbl_ht->af = sta->ht_cap.ampdu_factor;
- wtbl_ht->mm = sta->ht_cap.ampdu_density;
- buf_len += sizeof(*wtbl_ht);
-
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- val |= MT_WTBL_W5_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- val |= MT_WTBL_W5_SHORT_GI_40;
-
- /* vht basic */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *wtbl_vht;
-
- wtbl_vht = (struct wtbl_vht *)(buf + buf_len);
- buf_len += sizeof(*wtbl_vht);
- wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
- wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
- wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
- wtbl_vht->vht = 1;
- ntlv++;
-
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
- val |= MT_WTBL_W5_SHORT_GI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
- val |= MT_WTBL_W5_SHORT_GI_160;
- }
-
- /* smps */
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
- struct wtbl_smps *wtbl_smps;
-
- wtbl_smps = (struct wtbl_smps *)(buf + buf_len);
- buf_len += sizeof(*wtbl_smps);
- wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
- wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
- wtbl_smps->smps = 1;
- ntlv++;
- }
-
- /* sgi */
- msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
- MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
-
- wtbl_raw = (struct wtbl_raw *)(buf + buf_len);
- buf_len += sizeof(*wtbl_raw);
- wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
- wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
- wtbl_raw->wtbl_idx = 1;
- wtbl_raw->dw = 5;
- wtbl_raw->msk = cpu_to_le32(~msk);
- wtbl_raw->val = cpu_to_le32(val);
-
- wtbl_hdr->tlv_num = cpu_to_le16(ntlv);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- buf, buf_len, true);
- if (ret)
- goto out;
-
- memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
-
- sta_hdr = (struct sta_req_hdr *)buf;
- sta_hdr->bss_idx = mvif->idx;
- sta_hdr->wlan_idx = msta->wcid.idx;
- sta_hdr->is_tlv_append = 1;
- ntlv = sta->vht_cap.vht_supported ? 2 : 1;
- sta_hdr->tlv_num = cpu_to_le16(ntlv);
- sta_hdr->muar_idx = mvif->omac_idx;
- buf_len = sizeof(*sta_hdr);
-
- sta_ht = (struct sta_rec_ht *)(buf + buf_len);
- sta_ht->tag = cpu_to_le16(STA_REC_HT);
- sta_ht->len = cpu_to_le16(sizeof(*sta_ht));
- sta_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
- buf_len += sizeof(*sta_ht);
-
- if (sta->vht_cap.vht_supported) {
- struct sta_rec_vht *sta_vht;
-
- sta_vht = (struct sta_rec_vht *)(buf + buf_len);
- buf_len += sizeof(*sta_vht);
- sta_vht->tag = cpu_to_le16(STA_REC_VHT);
- sta_vht->len = cpu_to_le16(sizeof(*sta_vht));
- sta_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- sta_vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- sta_vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
- }
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- buf, buf_len, true);
-out:
- kfree(buf);
+ mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
- return ret;
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
@@ -1480,27 +1697,10 @@ int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
struct mt7615_vif *mvif = msta->vif;
struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_ba ba;
- } wtbl_req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_SET,
- .tlv_num = cpu_to_le16(1),
- },
- .ba = {
- .tag = cpu_to_le16(WTBL_BA),
- .len = cpu_to_le16(sizeof(struct wtbl_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_ORIGINATOR,
- .sn = add ? cpu_to_le16(params->ssn) : 0,
- .ba_en = add,
- },
- };
- struct {
struct sta_req_hdr hdr;
struct sta_rec_ba ba;
- } sta_req = {
+ u8 buf[MT7615_WTBL_UPDATE_MAX_SIZE];
+ } __packed req = {
.hdr = {
.bss_idx = mvif->idx,
.wlan_idx = msta->wcid.idx,
@@ -1519,7 +1719,32 @@ int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
.winsize = cpu_to_le16(params->buf_size),
},
};
- int ret;
+ struct sta_rec_wtbl *wtbl = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct wtbl_ba *wtbl_ba;
+ u8 *buf = req.buf;
+
+ if (dev->fw_ver > MT7615_FIRMWARE_V1) {
+ req.hdr.tlv_num = cpu_to_le16(2);
+ wtbl = (struct sta_rec_wtbl *)buf;
+ wtbl->tag = cpu_to_le16(STA_REC_WTBL);
+ buf += sizeof(*wtbl);
+ }
+
+ wtbl_hdr = (struct wtbl_req_hdr *)buf;
+ buf += sizeof(*wtbl_hdr);
+ wtbl_hdr->wlan_idx = msta->wcid.idx;
+ wtbl_hdr->operation = WTBL_SET;
+ wtbl_hdr->tlv_num = cpu_to_le16(1);
+
+ wtbl_ba = (struct wtbl_ba *)buf;
+ buf += sizeof(*wtbl_ba);
+ wtbl_ba->tag = cpu_to_le16(WTBL_BA);
+ wtbl_ba->len = cpu_to_le16(sizeof(*wtbl_ba));
+ wtbl_ba->tid = params->tid;
+ wtbl_ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ wtbl_ba->sn = add ? cpu_to_le16(params->ssn) : 0;
+ wtbl_ba->ba_en = add;
if (add) {
u8 idx, ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
@@ -1529,16 +1754,14 @@ int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
break;
}
- wtbl_req.ba.ba_winsize_idx = idx;
+ wtbl_ba->ba_winsize_idx = idx;
}
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &wtbl_req, sizeof(wtbl_req), true);
- if (ret)
- return ret;
+ if (wtbl)
+ wtbl->len = cpu_to_le16(buf - (u8 *)wtbl_hdr);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &sta_req, sizeof(sta_req), true);
+ return mt7615_mcu_send_sta_rec(dev, (u8 *)&req, (u8 *)wtbl_hdr,
+ buf - (u8 *)wtbl_hdr, true);
}
int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
@@ -1548,28 +1771,10 @@ int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
struct mt7615_vif *mvif = msta->vif;
struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_ba ba;
- } wtbl_req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_SET,
- .tlv_num = cpu_to_le16(1),
- },
- .ba = {
- .tag = cpu_to_le16(WTBL_BA),
- .len = cpu_to_le16(sizeof(struct wtbl_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_RECIPIENT,
- .rst_ba_tid = params->tid,
- .rst_ba_sel = RST_BA_MAC_TID_MATCH,
- .rst_ba_sb = 1,
- },
- };
- struct {
struct sta_req_hdr hdr;
struct sta_rec_ba ba;
- } sta_req = {
+ u8 buf[MT7615_WTBL_UPDATE_MAX_SIZE];
+ } __packed req = {
.hdr = {
.bss_idx = mvif->idx,
.wlan_idx = msta->wcid.idx,
@@ -1588,17 +1793,41 @@ int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
.winsize = cpu_to_le16(params->buf_size),
},
};
- int ret;
+ struct sta_rec_wtbl *wtbl = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct wtbl_ba *wtbl_ba;
+ u8 *buf = req.buf;
+
+ if (dev->fw_ver > MT7615_FIRMWARE_V1) {
+ req.hdr.tlv_num = cpu_to_le16(2);
+ wtbl = (struct sta_rec_wtbl *)buf;
+ wtbl->tag = cpu_to_le16(STA_REC_WTBL);
+ buf += sizeof(*wtbl);
+ }
- memcpy(wtbl_req.ba.peer_addr, params->sta->addr, ETH_ALEN);
+ wtbl_hdr = (struct wtbl_req_hdr *)buf;
+ buf += sizeof(*wtbl_hdr);
+ wtbl_hdr->wlan_idx = msta->wcid.idx;
+ wtbl_hdr->operation = WTBL_SET;
+ wtbl_hdr->tlv_num = cpu_to_le16(1);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &sta_req, sizeof(sta_req), true);
- if (ret || !add)
- return ret;
+ wtbl_ba = (struct wtbl_ba *)buf;
+ buf += sizeof(*wtbl_ba);
+ wtbl_ba->tag = cpu_to_le16(WTBL_BA);
+ wtbl_ba->len = cpu_to_le16(sizeof(*wtbl_ba));
+ wtbl_ba->tid = params->tid;
+ wtbl_ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ wtbl_ba->rst_ba_tid = params->tid;
+ wtbl_ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ wtbl_ba->rst_ba_sb = 1;
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &wtbl_req, sizeof(wtbl_req), true);
+ memcpy(wtbl_ba->peer_addr, params->sta->addr, ETH_ALEN);
+
+ if (wtbl)
+ wtbl->len = cpu_to_le16(buf - (u8 *)wtbl_hdr);
+
+ return mt7615_mcu_send_sta_rec(dev, (u8 *)&req, (u8 *)wtbl_hdr,
+ buf - (u8 *)wtbl_hdr, add);
}
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
@@ -1613,3 +1842,21 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
sizeof(req), true);
}
+
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 sku_enable;
+ u8 band_idx;
+ u8 rsv;
+ } req = {
+ .format_id = 0,
+ .band_idx = phy != &dev->phy,
+ .sku_enable = enable,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
+ sizeof(req), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 1fd7dffa6eef..db0199e60cb8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -45,6 +45,62 @@ enum {
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
};
+enum {
+ MT_SKU_CCK_1_2 = 0,
+ MT_SKU_CCK_55_11,
+ MT_SKU_OFDM_6_9,
+ MT_SKU_OFDM_12_18,
+ MT_SKU_OFDM_24_36,
+ MT_SKU_OFDM_48,
+ MT_SKU_OFDM_54,
+ MT_SKU_HT20_0_8,
+ MT_SKU_HT20_32,
+ MT_SKU_HT20_1_2_9_10,
+ MT_SKU_HT20_3_4_11_12,
+ MT_SKU_HT20_5_13,
+ MT_SKU_HT20_6_14,
+ MT_SKU_HT20_7_15,
+ MT_SKU_HT40_0_8,
+ MT_SKU_HT40_32,
+ MT_SKU_HT40_1_2_9_10,
+ MT_SKU_HT40_3_4_11_12,
+ MT_SKU_HT40_5_13,
+ MT_SKU_HT40_6_14,
+ MT_SKU_HT40_7_15,
+ MT_SKU_VHT20_0,
+ MT_SKU_VHT20_1_2,
+ MT_SKU_VHT20_3_4,
+ MT_SKU_VHT20_5_6,
+ MT_SKU_VHT20_7,
+ MT_SKU_VHT20_8,
+ MT_SKU_VHT20_9,
+ MT_SKU_VHT40_0,
+ MT_SKU_VHT40_1_2,
+ MT_SKU_VHT40_3_4,
+ MT_SKU_VHT40_5_6,
+ MT_SKU_VHT40_7,
+ MT_SKU_VHT40_8,
+ MT_SKU_VHT40_9,
+ MT_SKU_VHT80_0,
+ MT_SKU_VHT80_1_2,
+ MT_SKU_VHT80_3_4,
+ MT_SKU_VHT80_5_6,
+ MT_SKU_VHT80_7,
+ MT_SKU_VHT80_8,
+ MT_SKU_VHT80_9,
+ MT_SKU_VHT160_0,
+ MT_SKU_VHT160_1_2,
+ MT_SKU_VHT160_3_4,
+ MT_SKU_VHT160_5_6,
+ MT_SKU_VHT160_7,
+ MT_SKU_VHT160_8,
+ MT_SKU_VHT160_9,
+ MT_SKU_1SS_DELTA,
+ MT_SKU_2SS_DELTA,
+ MT_SKU_3SS_DELTA,
+ MT_SKU_4SS_DELTA,
+};
+
struct mt7615_mcu_rxd {
__le32 rxd[4];
@@ -60,6 +116,52 @@ struct mt7615_mcu_rxd {
u8 s2d_index;
};
+struct mt7615_mcu_rdd_report {
+ struct mt7615_mcu_rxd rxd;
+
+ u8 idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 _rsv[2];
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ } hw_pulse[32];
+};
+
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
@@ -93,6 +195,7 @@ enum {
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
@@ -102,9 +205,12 @@ enum {
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_DBDC_CTRL = 0x45,
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_SET_RDD_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
};
@@ -156,6 +262,18 @@ enum {
DEV_INFO_MAX_NUM
};
+enum {
+ DBDC_TYPE_WMM,
+ DBDC_TYPE_MGMT,
+ DBDC_TYPE_BSS,
+ DBDC_TYPE_MBSS,
+ DBDC_TYPE_REPEATER,
+ DBDC_TYPE_MU,
+ DBDC_TYPE_BF,
+ DBDC_TYPE_PTA,
+ __DBDC_TYPE_MAX,
+};
+
struct bss_info_omac {
__le16 tag;
__le16 len;
@@ -447,9 +565,10 @@ struct sta_rec_ba {
__le16 winsize;
} __packed;
-#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
- sizeof(struct sta_rec_ht) + \
- sizeof(struct sta_rec_vht))
+struct sta_rec_wtbl {
+ __le16 tag;
+ __le16 len;
+} __packed;
enum {
STA_REC_BASIC,
@@ -464,6 +583,7 @@ enum {
STA_REC_HT,
STA_REC_VHT,
STA_REC_APPS,
+ STA_REC_WTBL = 13,
STA_REC_MAX_NUM
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
new file mode 100644
index 000000000000..0b445471b6e8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -0,0 +1,115 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "../trace.h"
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+static void
+mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7615_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->mt76.tx_napi);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, int irq)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
+ .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7615_tx_prepare_skb,
+ .tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_skb = mt7615_queue_rx_skb,
+ .rx_poll_complete = mt7615_rx_poll_complete,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_mac_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7615_ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7615_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 21486831172c..a84a9b4cbf4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
+#include <linux/regmap.h>
#include "../mt76.h"
#include "regs.h"
@@ -17,9 +18,11 @@
MT7615_MAX_INTERFACES)
#define MT7615_WATCHDOG_TIME (HZ / 10)
+#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
#define MT7615_TX_RING_SIZE 1024
+#define MT7615_TX_MGMT_RING_SIZE 128
#define MT7615_TX_MCU_RING_SIZE 128
#define MT7615_TX_FWDL_RING_SIZE 128
@@ -30,14 +33,30 @@
#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
+#define MT7622_FIRMWARE_N9 "mediatek/mt7622_n9.bin"
+#define MT7622_ROM_PATCH "mediatek/mt7622_rom_patch.bin"
+
+#define MT7615_FIRMWARE_V1 1
+#define MT7615_FIRMWARE_V2 2
+
#define MT7615_EEPROM_SIZE 1024
#define MT7615_TOKEN_SIZE 4096
#define MT_FRAC_SCALE 12
#define MT_FRAC(val, div) (((val) << MT_FRAC_SCALE) / (div))
+#define MT_CHFREQ_VALID BIT(7)
+#define MT_CHFREQ_DBDC_IDX BIT(6)
+#define MT_CHFREQ_SEQ GENMASK(5, 0)
+
+#define MT7615_BAR_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
struct mt7615_vif;
struct mt7615_sta;
+struct mt7615_dfs_pulse;
+struct mt7615_dfs_pattern;
enum mt7615_hw_txq_id {
MT7615_TXQ_MAIN,
@@ -46,6 +65,16 @@ enum mt7615_hw_txq_id {
MT7615_TXQ_FWDL,
};
+enum mt7622_hw_txq_id {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_FWDL = MT7615_TXQ_FWDL,
+ MT7622_TXQ_AC3,
+ MT7622_TXQ_MGMT,
+ MT7622_TXQ_MCU = 15,
+};
+
struct mt7615_rate_set {
struct ieee80211_tx_rate probe_rate;
struct ieee80211_tx_rate rates[4];
@@ -79,12 +108,61 @@ struct mt7615_vif {
struct mt7615_sta sta;
};
+struct mib_stats {
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+};
+
+struct mt7615_phy {
+ struct mt76_phy *mt76;
+ struct mt7615_dev *dev;
+
+ u32 rxfilter;
+ u32 omac_mask;
+
+ u16 noise;
+
+ unsigned long last_cca_adj;
+ int false_cca_ofdm, false_cca_cck;
+ s8 ofdm_sensitivity;
+ s8 cck_sensitivity;
+
+ u16 chainmask;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 chfreq;
+ u8 rdd_state;
+ int dfs_state;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+};
+
struct mt7615_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
- __le32 rx_ampdu_ts;
+ u16 chainmask;
+
+ struct regmap *infracfg;
+
+ struct work_struct mcu_work;
+
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -96,17 +174,15 @@ struct mt7615_dev {
s16 power;
} radar_pattern;
u32 hw_pattern;
- int dfs_state;
- int false_cca_ofdm, false_cca_cck;
- unsigned long last_cca_adj;
u8 mac_work_count;
- s8 ofdm_sensitivity;
- s8 cck_sensitivity;
bool scs_en;
+ bool fw_debug;
spinlock_t token_lock;
struct idr token;
+
+ u8 fw_ver;
};
enum {
@@ -135,11 +211,6 @@ enum {
};
enum {
- MT_HW_RDD0,
- MT_HW_RDD1,
-};
-
-enum {
MT_RX_SEL0,
MT_RX_SEL1,
};
@@ -158,13 +229,53 @@ enum mt7615_rdd_cmd {
RDD_RESUME_BF,
};
+static inline struct mt7615_phy *
+mt7615_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7615_dev *
+mt7615_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7615_dev, mt76);
+}
+
+static inline struct mt7615_phy *
+mt7615_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phy2;
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
extern const struct ieee80211_ops mt7615_ops;
extern struct pci_driver mt7615_pci_driver;
+extern struct platform_driver mt7622_wmac_driver;
+#ifdef CONFIG_MT7622_WMAC
+int mt7622_wmac_init(struct mt7615_dev *dev);
+#else
+static inline int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ return 0;
+}
+#endif
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, int irq);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_register_ext_phy(struct mt7615_dev *dev);
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev);
int mt7615_eeprom_init(struct mt7615_dev *dev);
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
@@ -172,26 +283,22 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable);
int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
int en);
-void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
-int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- bool enable);
-int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
-int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool en);
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en);
-int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+int mt7615_mcu_set_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool en);
+int mt7615_mcu_set_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en);
+int mt7615_mcu_set_bcn(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int en);
-int mt7615_mcu_set_channel(struct mt7615_dev *dev);
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
@@ -200,30 +307,24 @@ int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
u8 rx_sel, u8 val);
-int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev);
-int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
static inline bool is_mt7622(struct mt76_dev *dev)
{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
return mt76_chip(dev) == 0x7622;
}
-static inline void mt7615_dfs_check_channel(struct mt7615_dev *dev)
+static inline bool is_mt7615(struct mt76_dev *dev)
{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- u32 freq = dev->mt76.chandef.chan->center_freq;
- struct ieee80211_hw *hw = mt76_hw(dev);
-
- if (hw->conf.chandef.chan->center_freq != freq ||
- hw->conf.chandef.width != width)
- dev->dfs_state = -1;
+ return mt76_chip(dev) == 0x7615;
}
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
@@ -239,26 +340,29 @@ static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
void mt7615_update_channel(struct mt76_dev *mdev);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
-void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev);
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
struct ieee80211_key_conf *key);
+void mt7615_mac_set_timing(struct mt7615_phy *phy);
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
+void mt7615_mac_reset_work(struct work_struct *work);
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_init_mac(struct mt7615_dev *dev);
-int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
-int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int enter);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
-int mt7615_mcu_set_tx_power(struct mt7615_dev *dev);
void mt7615_mcu_exit(struct mt7615_dev *dev);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
@@ -272,17 +376,20 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
-int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt7615_mac_work(struct work_struct *work);
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
-int mt76_dfs_start_rdd(struct mt7615_dev *dev, bool force);
-int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev);
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse);
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern);
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
int mt7615_init_debugfs(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
new file mode 100644
index 000000000000..d3eb49d83b98
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#if !defined(__MT7615_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7615_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7615.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7615
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TOKEN_ENTRY __field(u16, token)
+#define TOKEN_ASSIGN __entry->token = token
+#define TOKEN_PR_FMT " %d"
+#define TOKEN_PR_ARG __entry->token
+
+DECLARE_EVENT_CLASS(dev_token,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TOKEN_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ TOKEN_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TOKEN_PR_FMT,
+ DEV_PR_ARG, TOKEN_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_token, mac_tx_free,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt7615_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 1eb1eb659c3f..43e02128cc48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -10,84 +10,15 @@
#include <linux/pci.h>
#include "mt7615.h"
-#include "mac.h"
static const struct pci_device_id mt7615_pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7615) },
{ },
};
-u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
-{
- u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
- u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
-
- return MT_PCIE_REMAP_BASE_2 + offset;
-}
-
-static void
-mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
- mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
-}
-
-static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
-{
- struct mt7615_dev *dev = dev_instance;
- u32 intr;
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return IRQ_NONE;
-
- intr &= dev->mt76.mmio.irqmask;
-
- if (intr & MT_INT_TX_DONE_ALL) {
- mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
- napi_schedule(&dev->mt76.tx_napi);
- }
-
- if (intr & MT_INT_RX_DONE(0)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
- napi_schedule(&dev->mt76.napi[0]);
- }
-
- if (intr & MT_INT_RX_DONE(1)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
- napi_schedule(&dev->mt76.napi[1]);
- }
-
- return IRQ_HANDLED;
-}
-
static int mt7615_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .tx_prepare_skb = mt7615_tx_prepare_skb,
- .tx_complete_skb = mt7615_tx_complete_skb,
- .rx_skb = mt7615_queue_rx_skb,
- .rx_poll_complete = mt7615_rx_poll_complete,
- .sta_ps = mt7615_sta_ps,
- .sta_add = mt7615_sta_add,
- .sta_assoc = mt7615_sta_assoc,
- .sta_remove = mt7615_sta_remove,
- .update_survey = mt7615_update_channel,
- };
- struct mt7615_dev *dev;
- struct mt76_dev *mdev;
int ret;
ret = pcim_enable_device(pdev);
@@ -104,31 +35,7 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
-
- dev = container_of(mdev, struct mt7615_dev, mt76);
- mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
-
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
- (mt76_rr(dev, MT_HW_REV) & 0xff);
- dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
-
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
-
- ret = mt7615_register_device(dev);
- if (ret)
- goto error;
-
- return 0;
-error:
- ieee80211_free_hw(mt76_hw(dev));
- return ret;
+ return mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0], pdev->irq);
}
static void mt7615_pci_remove(struct pci_dev *pdev)
@@ -146,10 +53,7 @@ struct pci_driver mt7615_pci_driver = {
.remove = mt7615_pci_remove,
};
-module_pci_driver(mt7615_pci_driver);
-
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
MODULE_FIRMWARE(MT7615_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 61a4aa9ac6e6..fe68f6b2cbf8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -8,6 +8,10 @@
#define MT_HW_CHIPID 0x1008
#define MT_TOP_STRAP_STA 0x1010
#define MT_TOP_3NSS BIT(24)
+
+#define MT_TOP_OFF_RSV 0x1128
+#define MT_TOP_OFF_RSV_FW_STATE GENMASK(18, 16)
+
#define MT_TOP_MISC2 0x1134
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
@@ -31,14 +35,21 @@
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_MCU_INT_EVENT MT_HIF(0x1f8)
+#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
#define MT_INT_SOURCE_CSR MT_HIF(0x200)
#define MT_INT_MASK_CSR MT_HIF(0x204)
#define MT_DELAY_INT_CFG MT_HIF(0x210)
#define MT_INT_RX_DONE(_n) BIT(_n)
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
-#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
+#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+#define MT_INT_MCU_CMD BIT(30)
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
@@ -49,6 +60,7 @@
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
+#define MT_WPDMA_GLO_CFG_BYPASS_TX_SCH BIT(9) /* MT7622 */
#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
@@ -58,6 +70,22 @@
#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+#define MT_WPDMA_MEM_RNG_ERR MT_HIF(0x224)
+
+#define MT_MCU_CMD MT_HIF(0x234)
+#define MT_MCU_CMD_CLEAR_FW_OWN BIT(0)
+#define MT_MCU_CMD_STOP_PDMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_PDMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_LMAC_ERROR BIT(24)
+#define MT_MCU_CMD_PSE_ERROR BIT(25)
+#define MT_MCU_CMD_PLE_ERROR BIT(26)
+#define MT_MCU_CMD_PDMA_ERROR BIT(27)
+#define MT_MCU_CMD_PCIE_ERROR BIT(28)
+#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24))
+
#define MT_TX_RING_BASE MT_HIF(0x300)
#define MT_RX_RING_BASE MT_HIF(0x400)
@@ -81,36 +109,35 @@
#define MT_WF_PHY_BASE 0x10000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
-#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
+#define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400)
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
-#define MT_WF_PHY_R0_B0_PHYMUX_5 MT_WF_PHY(0x0614)
+#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9))
-#define MT_WF_PHY_R0_B0_PHYCTRL_STS0 MT_WF_PHY(0x020c)
+#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
-#define MT_WF_PHY_R0_B0_PHYCTRL_STS5 MT_WF_PHY(0x0220)
+#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
-#define MT_WF_PHY_B0_MIN_PRI_PWR MT_WF_PHY(0x229c)
-#define MT_WF_PHY_B0_PD_OFDM_MASK GENMASK(28, 20)
-#define MT_WF_PHY_B0_PD_OFDM(v) ((v) << 20)
-#define MT_WF_PHY_B0_PD_BLK BIT(19)
+#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c)
+#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \
+ GENMASK(28, 20))
+#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20))
+#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19))
-#define MT_WF_PHY_B1_MIN_PRI_PWR MT_WF_PHY(0x084)
-#define MT_WF_PHY_B1_PD_OFDM_MASK GENMASK(24, 16)
-#define MT_WF_PHY_B1_PD_OFDM(v) ((v) << 16)
-#define MT_WF_PHY_B1_PD_BLK BIT(25)
+#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200)
+#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2))
-#define MT_WF_PHY_B0_RXTD_CCK_PD MT_WF_PHY(0x2310)
-#define MT_WF_PHY_B0_PD_CCK_MASK GENMASK(8, 1)
-#define MT_WF_PHY_B0_PD_CCK(v) ((v) << 1)
+#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310)
+#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \
+ GENMASK(8, 1)
+#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1))
-#define MT_WF_PHY_B1_RXTD_CCK_PD MT_WF_PHY(0x2314)
-#define MT_WF_PHY_B1_PD_CCK_MASK GENMASK(31, 24)
-#define MT_WF_PHY_B1_PD_CCK(v) ((v) << 24)
+#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
+#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
#define MT_WF_CFG_BASE 0x20200
#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
@@ -131,8 +158,8 @@
#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
-#define MT_AGG_ARUCR MT_WF_AGG(0x018)
-#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
+#define MT_AGG_ARUCR(_band) MT_WF_AGG(0x018 + (_band) * 0x100)
+#define MT_AGG_ARDCR(_band) MT_WF_AGG(0x01c + (_band) * 0x100)
#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
@@ -142,8 +169,7 @@
#define MT_AGG_ASRCR1 MT_WF_AGG(0x064)
#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
-#define MT_AGG_ACR0 MT_WF_AGG(0x070)
-#define MT_AGG_ACR1 MT_WF_AGG(0x170)
+#define MT_AGG_ACR(_band) MT_WF_AGG(0x070 + (_band) * 0x100)
#define MT_AGG_ACR_NO_BA_RULE BIT(0)
#define MT_AGG_ACR_NO_BA_AR_RULE BIT(1)
#define MT_AGG_ACR_PKT_TIME_EN BIT(2)
@@ -153,14 +179,33 @@
#define MT_AGG_SCR MT_WF_AGG(0x0fc)
#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
+#define MT_WF_ARB_BASE 0x20c00
+#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
+
+#define MT_ARB_SCR MT_WF_ARB(0x080)
+#define MT_ARB_SCR_TX0_DISABLE BIT(8)
+#define MT_ARB_SCR_RX0_DISABLE BIT(9)
+#define MT_ARB_SCR_TX1_DISABLE BIT(10)
+#define MT_ARB_SCR_RX1_DISABLE BIT(11)
+
#define MT_WF_TMAC_BASE 0x21000
#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
-#define MT_TMAC_TRCR0 MT_WF_TMAC(0x09c)
-#define MT_TMAC_TRCR1 MT_WF_TMAC(0x070)
+#define MT_TMAC_CDTR MT_WF_TMAC(0x090)
+#define MT_TMAC_ODTR MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_TRCR(_band) MT_WF_TMAC((_band) ? 0x070 : 0x09c)
#define MT_TMAC_TRCR_CCA_SEL GENMASK(31, 30)
#define MT_TMAC_TRCR_SEC_CCA_SEL GENMASK(29, 28)
+#define MT_TMAC_ICR(_band) MT_WF_TMAC((_band) ? 0x074 : 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
@@ -170,7 +215,7 @@
#define MT_WF_RMAC_BASE 0x21200
#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
-#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR(_band) MT_WF_RMAC((_band) ? 0x100 : 0x000)
#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
#define MT_WF_RFCR_DROP_VERSION BIT(3)
@@ -193,13 +238,15 @@
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
-#define MT_WF_RFCR1 MT_WF_RMAC(0x004)
+#define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004)
#define MT_WF_RFCR1_DROP_ACK BIT(4)
#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
#define MT_WF_RFCR1_DROP_BA BIT(6)
#define MT_WF_RFCR1_DROP_CFEND BIT(7)
#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+#define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030)
+
#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
@@ -207,6 +254,7 @@
#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380)
#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8)
+#define MT_WF_RMAC_MIB_TIME6 MT_WF_RMAC(0x03dc)
#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
#define MT_WF_DMA_BASE 0x21800
@@ -216,8 +264,7 @@
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
-#define MT_DMA_BN0RCFR0 MT_WF_DMA(0x070)
-#define MT_DMA_BN1RCFR0 MT_WF_DMA(0x0b0)
+#define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40)
#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
@@ -294,9 +341,9 @@
#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs))
#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c)
-#define MT_MIB_MB_SDR0(n) MT_WF_MIB(0x100 + ((n) << 4))
-#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
-#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR3(n) MT_WF_MIB(0x014 + ((n) << 9))
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
@@ -309,8 +356,56 @@
#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(0x100 + ((_band) << 9) + \
+ ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(0x104 + ((_band) << 9) + \
+ ((n) << 4))
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
+#define MT_DMASHDL_BASE 0x5000a000
+#define MT_DMASHDL_OPTIONAL 0x008
+#define MT_DMASHDL_PAGE 0x00c
+
+#define MT_DMASHDL_REFILL 0x010
+
+#define MT_DMASHDL_PKT_MAX_SIZE 0x01c
+#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
+#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
+
+#define MT_DMASHDL_GROUP_QUOTA(_n) (0x020 + ((_n) << 2))
+#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0)
+#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16)
+
+#define MT_DMASHDL_SCHED_SET0 0x0b0
+#define MT_DMASHDL_SCHED_SET1 0x0b4
+
+#define MT_DMASHDL_Q_MAP(_n) (0x0d0 + ((_n) << 2))
+#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0)
+#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8))
+
+#define MT_LED_BASE_PHYS 0x80024000
+#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
#define MT_EFUSE_BASE 0x81070000
#define MT_EFUSE_BASE_CTRL 0x000
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
@@ -328,4 +423,8 @@
#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+/* INFRACFG host register range on MT7622 */
+#define MT_INFRACFG_MISC 0x700
+#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
new file mode 100644
index 000000000000..07ec9ec282f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include "mt7615.h"
+
+int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ struct device_node *np = dev->mt76.dev->of_node;
+
+ if (!is_mt7622(&dev->mt76))
+ return 0;
+
+ dev->infracfg = syscon_regmap_lookup_by_phandle(np, "mediatek,infracfg");
+ if (IS_ERR(dev->infracfg)) {
+ dev_err(dev->mt76.dev, "Cannot find infracfg controller\n");
+ return PTR_ERR(dev->infracfg);
+ }
+
+ return 0;
+}
+
+static int mt7622_wmac_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *mem_base;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ return irq;
+ }
+
+ mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ return mt7615_mmio_probe(&pdev->dev, mem_base, irq);
+}
+
+static int mt7622_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7615_dev *dev = platform_get_drvdata(pdev);
+
+ mt7615_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7622_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7622-wmac" },
+ {},
+};
+
+struct platform_driver mt7622_wmac_driver = {
+ .driver = {
+ .name = "mt7622-wmac",
+ .of_match_table = mt7622_wmac_of_match,
+ },
+ .probe = mt7622_wmac_probe,
+ .remove = mt7622_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7622_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7622_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/trace.c b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
new file mode 100644
index 000000000000..6c02d5aff68f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt7615_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index d1405528b504..9087607b621e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -109,7 +109,7 @@ static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
s8 val, lna_5g[3], lna_2g;
u16 rssi_offset;
@@ -129,7 +129,7 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
u8 val;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 388b54cded1b..57f8d56737eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -264,12 +264,12 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (dev->mt76.cap.has_5ghz) {
/* overwrite unsupported features */
- mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
- mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x0_vht_cap_mask(&dev->mphy.sband_5g.sband);
+ mt76x0_init_txpower(dev, &dev->mphy.sband_5g.sband);
}
if (dev->mt76.cap.has_2ghz)
- mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt76x02_init_debugfs(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index b2ccf50512dc..700ae9c12f1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -16,7 +16,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x0_phy_set_channel(dev, chandef);
mt76x02_mac_cc_reset(dev);
@@ -28,7 +28,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
@@ -44,9 +44,9 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
mt76x0_phy_set_txpower(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index e2974e0ae1fc..0b520ae08d01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -20,7 +20,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -47,7 +47,7 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76x0e_stop_hw(dev);
}
@@ -67,6 +67,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
@@ -124,7 +125,7 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
return 0;
}
@@ -195,7 +196,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
- clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index 038187b390ce..007c762c6db1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -126,7 +126,7 @@ int mt76x0e_mcu_init(struct mt76x02_dev *dev)
if (err < 0)
return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 2ecd45f8af90..b56397c05218 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -23,7 +23,7 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
int ret = 0;
u8 bank, reg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -ENODEV;
bank = MT_RF_BANK(offset);
@@ -62,7 +62,7 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
u32 val;
u8 bank, reg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -ENODEV;
bank = MT_RF_BANK(offset);
@@ -109,7 +109,7 @@ mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state));
+ &dev->mphy.state));
return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
return mt76x0_rf_csr_wr(dev, offset, val);
@@ -127,7 +127,7 @@ static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state));
+ &dev->mphy.state));
ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
@@ -502,7 +502,7 @@ mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 val;
if (chan->band == NL80211_BAND_5GHZ)
@@ -543,7 +543,7 @@ static int
mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi,
u8 *info)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 val;
val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
@@ -696,7 +696,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
s8 target_power, s8 target_pa_power,
s16 ltssi)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int tssi_target = target_power << 12, tssi_slope;
int tssi_offset, tssi_db, ret;
u32 data;
@@ -844,12 +844,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 info;
- mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
- mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info);
+ mt76x0_get_tx_power_per_rate(dev, dev->mphy.chandef.chan, t);
+ mt76x0_get_power_info(dev, dev->mphy.chandef.chan, &info);
mt76x02_add_rate_power_offset(t, info);
- mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
- dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
+ mt76x02_limit_rate_power(t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info);
dev->target_power = info;
@@ -858,7 +858,7 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
u32 val, tx_alc, reg_val;
@@ -933,7 +933,7 @@ void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
};
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
int ch_group_index, freq, freq1;
u8 channel;
u32 val;
@@ -1037,7 +1037,7 @@ static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev)
if (abs(val - dev->cal.temp_vco) > 20) {
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
- dev->mt76.chandef.chan->hw_value);
+ dev->mphy.chandef.chan->hw_value);
dev->cal.temp_vco = val;
}
if (abs(val - dev->cal.temp) > 30) {
@@ -1057,7 +1057,7 @@ static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
- if ((dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
+ if ((dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
!is_mt7630(dev))
mt76x02_phy_dfs_adjust_agc(dev);
}
@@ -1069,7 +1069,7 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 65ba9fc6ea0b..5535b9c0632f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -71,7 +71,7 @@ static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
static void mt76x0u_cleanup(struct mt76x02_dev *dev)
{
- clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x0_chip_onoff(dev, false, false);
mt76u_queues_deinit(&dev->mt76);
}
@@ -80,13 +80,13 @@ static void mt76x0u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt76u_stop_tx(&dev->mt76);
mt76x02u_exit_beacon_config(dev);
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return;
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
@@ -112,7 +112,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -126,6 +126,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
@@ -172,8 +173,14 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
static int mt76x0u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = dev->mt76.hw;
+ struct mt76_usb *usb = &dev->mt76.usb;
int err;
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
goto out_err;
@@ -182,17 +189,13 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto out_err;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
err = mt76x0_register_device(dev);
if (err < 0)
goto out_err;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
- else
- hw->max_tx_fragments = 1;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
return 0;
@@ -240,7 +243,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(mdev);
- ret = mt76u_init(mdev, usb_intf);
+ ret = mt76u_init(mdev, usb_intf, false);
if (ret)
goto err;
@@ -283,7 +286,7 @@ err:
static void mt76x0_disconnect(struct usb_interface *usb_intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
if (!initialized)
return;
@@ -304,7 +307,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
mt76u_stop_rx(&dev->mt76);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
mt76x0_chip_onoff(dev, false, false);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 888a930a5e08..45502fd4693f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -168,7 +168,7 @@ int mt76x0u_mcu_init(struct mt76x02_dev *dev)
if (ret < 0)
return ret;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 0ca0bbfe8769..23040c193ca5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -70,18 +70,23 @@ struct mt76x02_beacon_ops {
(dev)->beacon_ops->pre_tbtt_enable(dev, enable)
struct mt76x02_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
struct mac_address macaddr_list[8];
struct mutex phy_mutex;
u16 vif_mask;
+ u16 chainmask;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
spinlock_t txstatus_fifo_lock;
u32 tx_airtime;
+ u32 ampdu_ref;
struct sk_buff *rx_head;
@@ -93,8 +98,7 @@ struct mt76x02_dev {
const struct mt76x02_beacon_ops *beacon_ops;
- struct sk_buff *beacons[8];
- u8 beacon_data_mask;
+ u8 beacon_data_count;
u8 tbtt_count;
@@ -104,13 +108,14 @@ struct mt76x02_dev {
struct mt76x02_calibration cal;
+ int txpower_conf;
s8 target_power;
s8 target_power_delta[2];
bool enable_tpc;
bool no_2ghz;
- u8 coverage_class;
+ s16 coverage_class;
u8 slottime;
struct mt76x02_dfs_pattern_detector dfs_pd;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 4209209ac940..5d034cec191b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -26,78 +26,40 @@ static int
mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = dev->beacon_ops->slot_size;
- struct mt76x02_txwi txwi;
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
- mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
- offset += sizeof(txwi);
-
- mt76_wr_copy(dev, offset, skb->data, skb->len);
- return 0;
-}
-
-static int
-__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
- struct sk_buff *skb)
-{
- int beacon_len = dev->beacon_ops->slot_size;
- int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
- int ret = 0;
- int i;
-
- /* Prevent corrupt transmissions during update */
- mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+ /* USB devices already reserve enough skb headroom for txwi's. This
+ * helps to save slow copies over USB.
+ */
+ if (mt76_is_usb(&dev->mt76)) {
+ struct mt76x02_txwi *txwi;
- if (skb) {
- ret = mt76x02_write_beacon(dev, beacon_addr, skb);
- if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx);
+ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, skb, NULL, NULL, skb->len);
+ skb_push(skb, sizeof(*txwi));
} else {
- dev->beacon_data_mask &= ~BIT(bcn_idx);
- for (i = 0; i < beacon_len; i += 4)
- mt76_wr(dev, beacon_addr + i, 0);
- }
+ struct mt76x02_txwi txwi;
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+ }
- return ret;
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
}
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb)
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev,
+ struct sk_buff *skb)
{
- bool force_update = false;
- int bcn_idx = 0;
- int i;
+ int bcn_len = dev->beacon_ops->slot_size;
+ int bcn_addr = MT_BEACON_BASE + (bcn_len * dev->beacon_data_count);
- for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
- if (vif_idx == i) {
- force_update = !!dev->beacons[i] ^ !!skb;
- dev_kfree_skb(dev->beacons[i]);
- dev->beacons[i] = skb;
- __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
- } else if (force_update && dev->beacons[i]) {
- __mt76x02_mac_set_beacon(dev, bcn_idx,
- dev->beacons[i]);
- }
-
- bcn_idx += !!dev->beacons[i];
- }
-
- for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
- if (!(dev->beacon_data_mask & BIT(i)))
- break;
-
- __mt76x02_mac_set_beacon(dev, i, NULL);
- }
-
- mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
- bcn_idx - 1);
- return 0;
+ if (!mt76x02_write_beacon(dev, bcn_addr, skb))
+ dev->beacon_data_count++;
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
@@ -116,7 +78,6 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
dev->mt76.beacon_mask |= BIT(mvif->idx);
} else {
dev->mt76.beacon_mask &= ~BIT(mvif->idx);
- mt76x02_mac_set_beacon(dev, mvif->idx, NULL);
}
if (!!old_mask == !!dev->mt76.beacon_mask)
@@ -182,7 +143,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!skb)
return;
- mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+ mt76x02_mac_set_beacon(dev, skb);
}
EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
@@ -241,17 +202,11 @@ EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
{
- int i;
-
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
-
- for (i = 0; i < 8; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
-
mt76x02_set_beacon_offsets(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 5dec33ed8527..ff6a9e4daac0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -307,8 +307,8 @@ static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
pulse->period <= 100100);
break;
case NL80211_DFS_JP:
- if (dev->mt76.chandef.chan->center_freq >= 5250 &&
- dev->mt76.chandef.chan->center_freq <= 5350) {
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350) {
/* JPW53 */
if (pulse->w1 <= 130)
ret = (pulse->period >= 28360 &&
@@ -616,7 +616,7 @@ static void mt76x02_dfs_tasklet(unsigned long arg)
u32 engine_mask;
int i;
- if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ if (test_bit(MT76_SCANNING, &dev->mphy.state))
goto out;
if (time_is_before_jiffies(dfs_pd->last_sw_check +
@@ -702,7 +702,7 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
u8 i, shift;
u32 data;
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_40:
shift = MT_DFS_NUM_ENGINES;
break;
@@ -722,8 +722,8 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
radar_specs = &etsi_radar_specs[shift];
break;
case NL80211_DFS_JP:
- if (dev->mt76.chandef.chan->center_freq >= 5250 &&
- dev->mt76.chandef.chan->center_freq <= 5350)
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350)
radar_specs = &jp_w53_radar_specs[shift];
else
radar_specs = &jp_w56_radar_specs[shift];
@@ -822,7 +822,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->mt76.region != NL80211_DFS_UNSET) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 4460548f346a..8b072277ea10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -6,6 +6,7 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
+#include "trace.h"
void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
{
@@ -200,7 +201,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = dev->mphy.chandef.chan->band;
u16 val;
r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
@@ -344,7 +345,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
+ u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@@ -386,7 +387,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
- txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
@@ -487,17 +488,17 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
first_rate |= st->pktid & MT_PKTID_RATE;
mt76x02_mac_process_tx_rate(&rate[0], first_rate,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
} else if (rate[0].idx < 0) {
if (!msta)
return;
mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
}
mt76x02_mac_process_tx_rate(&last_rate, st->rate,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
retry--;
@@ -630,7 +631,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (!len)
goto out;
- duration = mt76_calc_tx_airtime(&dev->mt76, &info, len);
+ duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
spin_lock_bh(&dev->mt76.cc_lock);
dev->tx_airtime += duration;
@@ -679,7 +680,7 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
status->rate_idx = idx;
break;
case MT_PHY_TYPE_VHT: {
- u8 n_rxstream = dev->mt76.chainmask & 0xf;
+ u8 n_rxstream = dev->chainmask & 0xf;
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
@@ -741,6 +742,8 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+ /* enable 7 additional beacon slots and control them with bypass mask */
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
for (i = 0; i < 16; i++)
mt76x02_mac_set_bssid(dev, i, null_addr);
@@ -769,13 +772,13 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+ int pad_len = 0, nstreams = dev->chainmask & 0xf;
s8 signal;
u8 pn_len;
u8 wcid;
int len;
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return -EINVAL;
if (rxinfo & MT_RXINFO_L2PAD)
@@ -824,7 +827,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
if (rxinfo & MT_RXINFO_AMPDU) {
status->flag |= RX_FLAG_AMPDU_DETAILS;
- status->ampdu_ref = dev->mt76.ampdu_ref;
+ status->ampdu_ref = dev->ampdu_ref;
/*
* When receiving an A-MPDU subframe and RSSI info is not valid,
@@ -832,8 +835,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
* are coming. The last one will have valid RSSI info
*/
if (rxinfo & MT_RXINFO_RSSI) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
}
}
@@ -853,8 +856,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
signal = max_t(s8, signal, status->chain_signal[1]);
}
status->signal = signal;
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
+ status->freq = dev->mphy.chandef.chan->center_freq;
+ status->band = dev->mphy.chandef.chan->band;
status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
@@ -868,7 +871,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
u8 update = 1;
bool ret;
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return;
trace_mac_txstat_poll(dev);
@@ -908,7 +911,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
txwi = (struct mt76x02_txwi *)txwi_ptr;
- trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
}
@@ -1018,7 +1021,7 @@ void mt76x02_update_channel(struct mt76_dev *mdev)
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->chan_state;
+ state = mdev->phy.chan_state;
state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
@@ -1074,7 +1077,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev)
dev->ed_silent = 0;
if (dev->ed_monitor) {
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
@@ -1184,7 +1187,7 @@ void mt76x02_mac_work(struct work_struct *work)
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
{
- dev->mt76.survey_time = ktime_get_boottime();
+ dev->mphy.survey_time = ktime_get_boottime();
mt76_wr(dev, MT_CH_TIME_CFG,
MT_CH_TIME_CFG_TIMER_EN |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 7d946aa77182..c70d17b2290c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -152,7 +152,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
int i;
for (i = 0; i < 500; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return false;
switch (dev->bus->rr(dev, MAC_CSR0)) {
@@ -201,8 +201,7 @@ void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb);
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev, struct sk_buff *skb);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 6274b6a24b07..5664749ad6c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -24,11 +24,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
@@ -65,7 +65,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -141,7 +141,7 @@ int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
- while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
+ while ((skb = skb_dequeue(&dev->mt76.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 4e2371c926d8..c7f028e73b6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -9,7 +9,7 @@
#include "mt76x02.h"
#include "mt76x02_mcu.h"
-#include "mt76x02_trace.h"
+#include "trace.h"
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
@@ -24,10 +24,17 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
mt76x02_resync_beacon_timer(dev);
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
+
mt76_csa_check(&dev->mt76);
if (dev->mt76.csa_complete)
@@ -151,7 +158,7 @@ static void mt76x02_tx_tasklet(unsigned long data)
mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
@@ -261,10 +268,10 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
- trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
intr &= dev->mt76.mmio.irqmask;
@@ -402,7 +409,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
lockdep_assert_held(&dev->mt76.mutex);
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
rcu_read_lock();
ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
@@ -420,6 +427,8 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
if (!wcid)
continue;
+ rcu_assign_pointer(dev->mt76.wcid[i], NULL);
+
priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
@@ -441,7 +450,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
int i;
ieee80211_stop_queues(dev->mt76.hw);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->mt76.tx_tasklet);
@@ -476,7 +485,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart)
mt76_mcu_restart(dev);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
@@ -496,7 +505,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_unlock(&dev->mt76.mutex);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
tasklet_enable(&dev->mt76.tx_tasklet);
napi_enable(&dev->mt76.tx_napi);
@@ -514,7 +523,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
ieee80211_restart_hw(dev->mt76.hw);
} else {
ieee80211_wake_queues(dev->mt76.hw);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
}
@@ -535,9 +544,9 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
restart:
mt76x02_watchdog_reset(dev);
- mutex_lock(&dev->mt76.mmio.mcu.mutex);
+ mutex_lock(&dev->mt76.mcu.mutex);
dev->mcu_timeout = 0;
- mutex_unlock(&dev->mt76.mmio.mcu.mutex);
+ mutex_unlock(&dev->mt76.mcu.mutex);
dev->tx_hang_reset++;
dev->tx_hang_check = 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index d7334267b530..aaadc15ea83c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -16,7 +16,7 @@ void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
- switch (dev->mt76.chainmask & 0xf) {
+ switch (dev->chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@@ -35,7 +35,7 @@ void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
- txpath = (dev->mt76.chainmask >> 8) & 0xf;
+ txpath = (dev->chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index fc2e41006a0d..1def25bf735a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -11,7 +11,7 @@
static inline int
mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
{
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -62;
case NL80211_CHAN_WIDTH_40:
@@ -24,7 +24,7 @@ mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
static inline int
mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
{
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -76;
case NL80211_CHAN_WIDTH_40:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 21c0f351fa09..3e722276b5c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -235,15 +235,9 @@
#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
#define MT_LED_S1_BASE 0x0780
#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
- MT_LED_STATUS_DURATION_MASK)
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 8)
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
index 61ecaf0fe065..6a98092e996b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -20,7 +20,6 @@
#define DEV_PR_ARG __entry->wiphy_name
#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
-#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
#define TXID_PR_FMT " [%d:%d]"
#define TXID_PR_ARG __entry->wcid, __entry->pktid
@@ -36,28 +35,6 @@ DECLARE_EVENT_CLASS(dev_evt,
TP_printk(DEV_PR_FMT, DEV_PR_ARG)
);
-DECLARE_EVENT_CLASS(dev_txid_evt,
- TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
- TP_ARGS(dev, wcid, pktid),
- TP_STRUCT__entry(
- DEV_ENTRY
- TXID_ENTRY
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- TXID_ASSIGN;
- ),
- TP_printk(
- DEV_PR_FMT TXID_PR_FMT,
- DEV_PR_ARG, TXID_PR_ARG
- )
-);
-
-DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
- TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
- TP_ARGS(dev, wcid, pktid)
-);
-
DEFINE_EVENT(dev_evt, mac_txstat_poll,
TP_PROTO(struct mt76x02_dev *dev),
TP_ARGS(dev)
@@ -100,29 +77,6 @@ TRACE_EVENT(mac_txstat_fetch,
)
);
-TRACE_EVENT(dev_irq,
- TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
-
- TP_ARGS(dev, val, mask),
-
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, val)
- __field(u32, mask)
- ),
-
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->val = val;
- __entry->mask = mask;
- ),
-
- TP_printk(
- DEV_PR_FMT " %08x & %08x",
- DEV_PR_ARG, __entry->val, __entry->mask
- )
-);
-
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 13825f642087..96fdf423a348 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -28,7 +28,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
wcid = &mvif->group_wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
}
EXPORT_SYMBOL_GPL(mt76x02_tx);
@@ -39,7 +39,6 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
void *rxwi = skb->data;
if (q == MT_RXQ_MCU) {
- /* this is used just by mmio code */
mt76_mcu_rx_event(&dev->mt76, skb);
return;
}
@@ -74,7 +73,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
} else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
@@ -96,7 +95,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{
- txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
txpwr -= (dev->target_power + dev->target_power_delta[0]);
txpwr = min_t(s8, txpwr, max_txpwr_adj);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index d03d3c8e296c..0180b6200b17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -46,8 +46,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
- struct sk_buff *iter, *last = skb;
- u32 info, pad;
+ u32 info;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
@@ -57,28 +56,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
*/
info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- /* Add zero pad of 4 - 7 bytes */
- pad = round_up(skb->len, 4) + 4 - skb->len;
-
- /* First packet of a A-MSDU burst keeps track of the whole burst
- * length, need to update length of it and the last packet.
- */
- skb_walk_frags(skb, iter) {
- last = iter;
- if (!iter->next) {
- skb->data_len += pad;
- skb->len += pad;
- break;
- }
- }
-
- if (skb_pad(last, pad))
- return -ENOMEM;
- __skb_put(last, pad);
-
- return 0;
+ return mt76u_skb_dma_info(skb, info);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
@@ -198,7 +177,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
container_of(work, struct mt76x02_dev, pre_tbtt_work);
struct beacon_bc_data data = {};
struct sk_buff *skb;
- int i, nbeacons;
+ int nbeacons;
if (!dev->mt76.beacon_mask)
return;
@@ -208,17 +187,30 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
mt76x02_resync_beacon_timer(dev);
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete) {
+ mt76_csa_finish(&dev->mt76);
+ goto out;
+ }
+
nbeacons = hweight8(dev->mt76.beacon_mask);
mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
- for (i = nbeacons; i < N_BCN_SLOTS; i++) {
- skb = __skb_dequeue(&data.q);
- mt76x02_mac_set_beacon(dev, i, skb);
- }
+ while ((skb = __skb_dequeue(&data.q)) != NULL)
+ mt76x02_mac_set_beacon(dev, skb);
+
+out:
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
mt76x02u_restart_pre_tbtt_timer(dev);
}
@@ -244,20 +236,11 @@ static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
{
- int i;
-
if (WARN_ON_ONCE(!dev->mt76.beacon_int))
return;
- if (en) {
+ if (en)
mt76x02u_start_pre_tbtt_timer(dev);
- } else {
- /* Timer is already stopped, only clean up
- * PS buffered frames if any.
- */
- for (i = 0; i < N_BCN_SLOTS; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
- }
}
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
@@ -280,7 +263,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
{
- if (!test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (!test_bit(MT76_REMOVED, &dev->mphy.state))
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index a993cd7e9948..843b86560ed4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -55,7 +55,8 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
u32 rxfce;
for (i = 0; i < 5; i++) {
- ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
+ ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
+ 300, MT_EP_IN_CMD_RESP);
if (ret == -ETIMEDOUT)
continue;
if (ret)
@@ -82,18 +83,17 @@ static int
__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
- struct mt76_usb *usb = &dev->usb;
- int ret;
u8 seq = 0;
u32 info;
+ int ret;
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return 0;
if (wait_resp) {
- seq = ++usb->mcu.msg_seq & 0xf;
+ seq = ++dev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++usb->mcu.msg_seq & 0xf;
+ seq = ++dev->mcu.msg_seq & 0xf;
}
info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
@@ -103,7 +103,8 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (ret)
return ret;
- ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
+ ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
+ MT_EP_OUT_INBAND_CMD);
if (ret)
return ret;
@@ -119,7 +120,6 @@ static int
mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp)
{
- struct mt76_usb *usb = &dev->usb;
struct sk_buff *skb;
int err;
@@ -127,9 +127,9 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
return err;
}
@@ -143,9 +143,8 @@ static int
mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
- const int CMD_RANDOM_WRITE = 12;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
- struct mt76_usb *usb = &dev->usb;
+ const int CMD_RANDOM_WRITE = 12;
struct sk_buff *skb;
int cnt, i, ret;
@@ -164,9 +163,9 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
if (ret)
return ret;
@@ -200,7 +199,7 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
usb->mcu.rp = data;
usb->mcu.rp_len = n;
@@ -211,7 +210,7 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = NULL;
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
return ret;
}
@@ -248,7 +247,8 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
data_len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
+ err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
+ MT_EP_OUT_INBAND_CMD);
if (err) {
dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 0960fc56b672..b7a120b0856d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -96,9 +96,9 @@ mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
mt76);
u32 val;
- val = MT_LED_STATUS_DURATION(0xff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
@@ -166,7 +166,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -182,21 +181,22 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
dev->slottime = 9;
if (is_mt76x2(dev)) {
- dev->mt76.sband_2g.sband.ht_cap.cap |=
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |=
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.chainmask = 0x202;
- dev->mt76.antenna_mask = 3;
+ dev->chainmask = 0x202;
+ dev->mphy.antenna_mask = 3;
} else {
- dev->mt76.chainmask = 0x101;
- dev->mt76.antenna_mask = 1;
+ dev->chainmask = 0x101;
+ dev->mphy.antenna_mask = 1;
}
}
EXPORT_SYMBOL_GPL(mt76x02_init_device);
@@ -325,7 +325,9 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_STATION)
idx += 8;
- if (dev->vif_mask & BIT(idx))
+ /* vif is already set or idx is 8 for AP/Mesh/... */
+ if (dev->vif_mask & BIT(idx) ||
+ (vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
dev->vif_mask |= BIT(idx);
@@ -545,7 +547,7 @@ void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
- dev->coverage_class = coverage_class;
+ dev->coverage_class = max_t(s16, coverage_class, 0);
mt76x02_set_tx_ackto(dev);
mutex_unlock(&dev->mt76.mutex);
}
@@ -602,7 +604,7 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_SCANNING, &dev->mt76.state);
+ clear_bit(MT76_SCANNING, &dev->mphy.state);
if (dev->cal.gain_init_done) {
/* Restore AGC gain and resume calibration after scanning. */
dev->cal.low_gain = -1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
index 7b2b187fbf47..caf089538c11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -13,5 +13,3 @@ mt76x2e-y := \
mt76x2u-y := \
usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
usb_phy.o
-
-CFLAGS_pci_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 9f91556c7f38..4a748a6f0ce2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -248,7 +248,7 @@ mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
{
- enum nl80211_band band = dev->mt76.chandef.chan->band;
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
u16 val, slope;
u8 bounds;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index 4dcf6518cb0d..3755632e6494 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -53,7 +53,7 @@ mt76x2_has_ext_lna(struct mt76x02_dev *dev)
{
u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
else
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 79e583eb066b..a92a479aebaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -82,7 +82,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_PBF_SYS_CTRL, 0x00080c00 },
{ MT_PBF_CFG, 0x1efebcff },
{ MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
{ MT_MAX_LEN_CFG, 0x003e3f00 },
{ MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
{ MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 76d8cd37d4de..9635c04ce032 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -29,7 +29,7 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
.idx = channel,
.scan = scan,
.bw = bw,
- .chainmask = cpu_to_le16(dev->mt76.chainmask),
+ .chainmask = cpu_to_le16(dev->chainmask),
};
/* first set the channel without the extension channel info */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 41680c420cda..eca95b7f64d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -30,7 +30,7 @@ static inline bool is_mt7612(struct mt76x02_dev *dev)
static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
return ((chan->flags & IEEE80211_CHAN_RADAR) &&
chan->dfs_state != NL80211_DFS_AVAILABLE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 33fcec9179b2..c69579e5f647 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -239,7 +239,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
if (ret)
return ret;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x02_mac_start(dev);
ret = mt76x2_mcu_init(dev);
@@ -289,8 +289,8 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
goto fail;
mt76x02_init_debugfs(dev);
- mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index cfe8905ce73f..105e5b99b3f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -22,7 +22,7 @@ mt76x2_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -31,7 +31,7 @@ mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76x2_stop_hardware(dev);
}
@@ -45,9 +45,9 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
@@ -57,13 +57,13 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
return ret;
}
@@ -86,14 +86,14 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->mt76.txpower_conf -= 6;
+ dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
mt76x2_phy_set_txpower(dev);
- mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
+ mt76x02_tx_set_txpwr_auto(dev, dev->txpower_conf);
}
}
@@ -124,8 +124,8 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
mutex_lock(&dev->mt76.mutex);
- dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
- dev->mt76.antenna_mask = tx_ant;
+ dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+ dev->mphy.antenna_mask = tx_ant;
mt76_set_stream_caps(&dev->mt76, true);
mt76x2_phy_set_antenna(dev);
@@ -145,6 +145,7 @@ const struct ieee80211_ops mt76x2_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 23f35bf8d47b..8831337df23e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -12,7 +12,7 @@
static bool
mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 flag = 0;
if (!mt76x2_tssi_enabled(dev))
@@ -35,7 +35,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
static void
mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
@@ -74,7 +74,7 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~(BIT(4) | BIT(1));
- switch (dev->mt76.antenna_mask) {
+ switch (dev->mphy.antenna_mask) {
case 1:
/* disable mac DAC control */
mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
@@ -118,7 +118,7 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *chan = chandef->chan;
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
enum nl80211_band band = chan->band;
u8 channel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index edbab4fa7f6e..ed2dcb05d614 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -136,8 +136,8 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r)
void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ enum nl80211_chan_width width = dev->mphy.chandef.width;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
int txp_0, txp_1, delta = 0;
struct mt76_rate_power t = {};
@@ -152,8 +152,8 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
mt76x2_get_rate_power(dev, &t, chan);
mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
- mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
- dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
+ mt76x02_limit_rate_power(&t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(&t);
base_power = mt76x2_get_min_rate_power(&t);
delta = base_power - txp.target_power;
@@ -202,7 +202,7 @@ EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
struct mt76x2_tssi_comp t = {};
@@ -252,12 +252,12 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
val = 0x1836 << 16;
if (!mt76x2_has_ext_lna(dev) &&
- dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ dev->mphy.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
if (mt76x2_has_ext_lna(dev) &&
- dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
- dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+ dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ &&
+ dev->mphy.chandef.width < NL80211_CHAN_WIDTH_40)
val = 0x0f36 << 16;
val |= 0xf8;
@@ -267,7 +267,7 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(AGC, 9),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
- if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ if (dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR)
mt76x02_phy_dfs_adjust_agc(dev);
}
@@ -280,7 +280,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
@@ -297,7 +297,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
return;
}
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
if (low_gain == 2)
@@ -315,11 +315,11 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
low_gain_delta = 14;
agc_37 = 0x2121262c;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
agc_35 = 0x11111516;
else if (low_gain == 2)
agc_35 = agc_37 = 0x08080808;
- else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ else if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80)
agc_35 = 0x10101014;
else
agc_35 = 0x11111116;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index b64ad816cc25..eafa283ca699 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -54,7 +54,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
mt76x02u_init_mcu(mdev);
- err = mt76u_init(mdev, intf);
+ err = mt76u_init(mdev, intf, false);
if (err < 0)
goto err;
@@ -86,7 +86,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct ieee80211_hw *hw = mt76_hw(dev);
- set_bit(MT76_REMOVED, &dev->mt76.state);
+ set_bit(MT76_REMOVED, &dev->mphy.state);
ieee80211_unregister_hw(hw);
mt76x2u_cleanup(dev);
mt76u_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 2910068f4e79..ffc2deba29ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -190,6 +190,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_usb *usb = &dev->mt76.usb;
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
@@ -199,6 +200,11 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
goto fail;
@@ -207,22 +213,18 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto fail;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (err)
goto fail;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
- else
- hw->max_tx_fragments = 1;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x02_init_debugfs(dev);
- mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 59cbe826188a..eaa622833f85 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -98,7 +98,7 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
bool stopped = false;
u32 rts_cfg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -EIO;
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 9e97204841f5..bab4e6e1904e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -17,7 +17,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -26,7 +26,7 @@ static void mt76x2u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76u_stop_tx(&dev->mt76);
mt76x2u_stop_hw(dev);
}
@@ -41,9 +41,9 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x02_pre_tbtt_enable(dev, false);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, false);
@@ -52,11 +52,11 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -78,12 +78,12 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->mt76.txpower_conf -= 6;
+ dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
mt76x2_phy_set_txpower(dev);
}
@@ -105,6 +105,7 @@ const struct ieee80211_ops mt76x2u_ops = {
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.ampdu_action = mt76x02_ampdu_action,
.config = mt76x2u_config,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index b1381f9df992..a04a98f5ce1e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -10,7 +10,7 @@
static void
mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
@@ -82,7 +82,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
};
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
struct ieee80211_channel *chan = chandef->chan;
u8 channel = chan->hw_value, bw, bw_index;
int ch_group_index, freq, freq1, ret;
@@ -185,7 +185,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct ieee80211_channel *chan;
u32 flag = 0;
- chan = dev->mt76.chandef.chan;
+ chan = dev->mphy.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
if (mt76x02_ext_pa_enabled(dev, chan->band))
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
index ed3df3c8b4b3..f199fcd2a63d 100644
--- a/drivers/net/wireless/mediatek/mt76/trace.c
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -9,4 +9,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(mac_txdone);
+EXPORT_TRACEPOINT_SYMBOL_GPL(dev_irq);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
index 0b3e635da868..c3d0ef8e2890 100644
--- a/drivers/net/wireless/mediatek/mt76/trace.h
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -14,7 +14,7 @@
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
-#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+#define DEVICE_ASSIGN strlcpy(__entry->wiphy_name, \
wiphy_name(dev->hw->wiphy), MAXNAME)
#define DEV_PR_FMT "%s"
#define DEV_PR_ARG __entry->wiphy_name
@@ -24,6 +24,11 @@
#define REG_PR_FMT " %04x=%08x"
#define REG_PR_ARG __entry->reg, __entry->val
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
DECLARE_EVENT_CLASS(dev_reg_evt,
TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val),
@@ -32,7 +37,7 @@ DECLARE_EVENT_CLASS(dev_reg_evt,
REG_ENTRY
),
TP_fast_assign(
- DEV_ASSIGN;
+ DEVICE_ASSIGN;
REG_ASSIGN;
),
TP_printk(
@@ -51,6 +56,51 @@ DEFINE_EVENT(dev_reg_evt, reg_wr,
TP_ARGS(dev, reg, val)
);
+TRACE_EVENT(dev_irq,
+ TP_PROTO(struct mt76_dev *dev, u32 val, u32 mask),
+
+ TP_ARGS(dev, val, mask),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, val)
+ __field(u32, mask)
+ ),
+
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ __entry->val = val;
+ __entry->mask = mask;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %08x & %08x",
+ DEV_PR_ARG, __entry->val, __entry->mask
+ )
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ ),
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ TXID_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT,
+ DEV_PR_ARG, TXID_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7ee91d946882..eff522dbda34 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -109,13 +109,17 @@ void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.unlock)
{
+ struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
__release(&dev->status_list.unlock);
- while ((skb = __skb_dequeue(list)) != NULL)
- ieee80211_tx_status(dev->hw, skb);
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_tx_status(hw, skb);
+ }
+
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
@@ -231,10 +235,12 @@ EXPORT_SYMBOL_GPL(mt76_tx_status_check);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
+ struct ieee80211_hw *hw;
struct sk_buff_head list;
if (!skb->prev) {
- ieee80211_free_txskb(dev->hw, skb);
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -245,13 +251,15 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
void
-mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
+ bool ext_phy = phy != &dev->phy;
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
@@ -275,6 +283,9 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
mt76_check_agg_ssn(mtxq, skb);
}
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
@@ -282,7 +293,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
- ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -291,9 +302,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
-mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_tx_info *info;
+ bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
skb = skb_dequeue(&mtxq->retry_q);
@@ -306,10 +319,14 @@ mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
return skb;
}
- skb = ieee80211_tx_dequeue(dev->hw, txq);
+ skb = ieee80211_tx_dequeue(phy->hw, txq);
if (!skb)
return NULL;
+ info = IEEE80211_SKB_CB(skb);
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
return skb;
}
@@ -335,7 +352,8 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
bool more_data)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
@@ -350,7 +368,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
continue;
do {
- skb = mt76_txq_dequeue(dev, mtxq, true);
+ skb = mt76_txq_dequeue(phy, mtxq, true);
if (!skb)
break;
@@ -377,9 +395,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq)
{
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
@@ -395,7 +414,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- skb = mt76_txq_dequeue(dev, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq, false);
if (!skb)
return 0;
@@ -423,10 +442,10 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
if (probe)
break;
- if (test_bit(MT76_RESET, &dev->state))
+ if (test_bit(MT76_RESET, &phy->state))
return -EBUSY;
- skb = mt76_txq_dequeue(dev, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq, false);
if (!skb)
break;
@@ -464,8 +483,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
+mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *hwq = sq->q;
struct ieee80211_txq *txq;
@@ -478,12 +498,12 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
if (sq->swq_queued >= 4)
break;
- if (test_bit(MT76_RESET, &dev->state)) {
+ if (test_bit(MT76_RESET, &phy->state)) {
ret = -EBUSY;
break;
}
- txq = ieee80211_next_txq(dev->hw, qid);
+ txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -505,8 +525,8 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
}
- ret += mt76_txq_send_burst(dev, sq, mtxq);
- ieee80211_return_txq(dev->hw, txq,
+ ret += mt76_txq_send_burst(phy, sq, mtxq);
+ ieee80211_return_txq(phy->hw, txq,
!skb_queue_empty(&mtxq->retry_q));
}
spin_unlock_bh(&hwq->lock);
@@ -514,8 +534,9 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
return ret;
}
-void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
@@ -528,21 +549,21 @@ void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
rcu_read_lock();
do {
- ieee80211_txq_schedule_start(dev->hw, qid);
- len = mt76_txq_schedule_list(dev, qid);
- ieee80211_txq_schedule_end(dev->hw, qid);
+ ieee80211_txq_schedule_start(phy->hw, qid);
+ len = mt76_txq_schedule_list(phy, qid);
+ ieee80211_txq_schedule_end(phy->hw, qid);
} while (len > 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
-void mt76_txq_schedule_all(struct mt76_dev *dev)
+void mt76_txq_schedule_all(struct mt76_phy *phy)
{
int i;
for (i = 0; i <= MT_TXQ_BK; i++)
- mt76_txq_schedule(dev, i);
+ mt76_txq_schedule(phy, i);
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
@@ -550,7 +571,9 @@ void mt76_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
- mt76_txq_schedule_all(dev);
+ mt76_txq_schedule_all(&dev->phy);
+ if (dev->phy2)
+ mt76_txq_schedule_all(dev->phy2);
}
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -578,9 +601,10 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state))
return;
tasklet_schedule(&dev->tx_tasklet);
@@ -589,6 +613,7 @@ EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
+ struct ieee80211_hw *hw;
struct mt76_txq *mtxq;
struct sk_buff *skb;
@@ -597,8 +622,10 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq = (struct mt76_txq *)txq->drv_priv;
- while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
- ieee80211_free_txskb(dev->hw, skb);
+ while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
+ }
}
EXPORT_SYMBOL_GPL(mt76_txq_remove);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index d6d47081e281..36ba81d63f12 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -29,13 +29,13 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
: usb_sndctrlpipe(udev, 0);
for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return -EIO;
ret = usb_control_msg(udev, pipe, req, req_type, val,
offset, buf, len, MT_VEND_REQ_TOUT_MS);
if (ret == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
if (ret >= 0 || ret == -ENODEV)
return ret;
usleep_range(5000, 10000);
@@ -62,12 +62,25 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
- u16 offset;
int ret;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ addr >> 16, addr, &usb->reg_val,
+ sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = le32_to_cpu(usb->reg_val);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -81,16 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
req = MT_VEND_MULTI_READ;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- 0, offset, &usb->reg_val, sizeof(__le32));
- if (ret == sizeof(__le32))
- data = le32_to_cpu(usb->reg_val);
- trace_usb_reg_rr(dev, addr, data);
-
- return data;
+ return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
}
static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -104,10 +109,32 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
- u16 offset;
+
+ usb->reg_val = cpu_to_le32(val);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr >> 16, addr, &usb->reg_val,
+ sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -118,13 +145,7 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
req = MT_VEND_MULTI_WRITE;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
-
- usb->reg_val = cpu_to_le32(val);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR, 0,
- offset, &usb->reg_val, sizeof(__le32));
- trace_usb_reg_wr(dev, addr, val);
+ ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
}
static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -134,6 +155,13 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
+static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
{
@@ -145,22 +173,94 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
return val;
}
+static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- const u32 *val = data;
- int i, ret;
+ const u8 *val = data;
+ int ret;
+ int current_batch_size;
+ int i = 0;
+
+ /* Assure that always a multiple of 4 bytes are copied,
+ * otherwise beacons can be corrupted.
+ * See: "mt76: round up length on mt76_wr_copy"
+ * Commit 850e8f6fbd5d0003b0
+ */
+ len = round_up(len, 4);
mutex_lock(&usb->usb_ctrl_mtx);
- for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
- put_unaligned(val[i], (u32 *)usb->data);
+ while (i < len) {
+ current_batch_size = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, current_batch_size);
ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
USB_DIR_OUT | USB_TYPE_VENDOR,
- 0, offset + i * 4, usb->data,
- sizeof(u32));
+ 0, offset + i, usb->data,
+ current_batch_size);
if (ret < 0)
break;
+
+ i += current_batch_size;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void
+mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int i = 0, batch_len, ret;
+ u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ memcpy(val + i, usb->data, batch_len);
+ i += batch_len;
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
@@ -200,7 +300,7 @@ static int
mt76u_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
else
return mt76u_req_wr_rp(dev, base, data, n);
@@ -227,7 +327,7 @@ static int
mt76u_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int n)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
else
return mt76u_req_rd_rp(dev, base, data, n);
@@ -306,11 +406,12 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
}
static int
-mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
+mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+ struct urb *urb, int nsgs, gfp_t gfp)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
- if (dev->usb.sg_en)
+ if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
urb->transfer_buffer_length = q->buf_size;
@@ -334,23 +435,25 @@ mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
usb_init_urb(e->urb);
- if (dev->usb.sg_en)
+ if (dev->usb.sg_en && sg_max_size > 0)
e->urb->sg = (struct scatterlist *)(e->urb + 1);
return 0;
}
static int
-mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
{
- int err;
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int err, sg_size;
- err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
+ sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
+ err = mt76u_urb_alloc(dev, e, sg_size);
if (err)
return err;
- return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
- GFP_KERNEL);
+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
@@ -386,10 +489,9 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
urb->context = context;
}
-static inline struct urb *
-mt76u_get_next_rx_entry(struct mt76_dev *dev)
+static struct urb *
+mt76u_get_next_rx_entry(struct mt76_queue *q)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = NULL;
unsigned long flags;
@@ -404,14 +506,17 @@ mt76u_get_next_rx_entry(struct mt76_dev *dev)
return urb;
}
-static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+static int
+mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
+ u32 data_len)
{
u16 dma_len, min_len;
dma_len = get_unaligned_le16(data);
- min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
- MT_FCE_INFO_LEN;
+ if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
+ return dma_len;
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
if (data_len < min_len || !dma_len ||
dma_len + MT_DMA_HDR_LEN > data_len ||
(dma_len & 0x3))
@@ -420,11 +525,14 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static struct sk_buff *
-mt76u_build_rx_skb(void *data, int len, int buf_size)
+mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
+ int len, int buf_size)
{
+ int head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
struct page *page;
/* slow path, not enough space for data and
@@ -434,8 +542,8 @@ mt76u_build_rx_skb(void *data, int len, int buf_size)
if (!skb)
return NULL;
- skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
- data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+ skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
+ data += head_room + MT_SKB_HEAD_LEN;
page = virt_to_head_page(data);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, data - page_address(page),
@@ -449,30 +557,31 @@ mt76u_build_rx_skb(void *data, int len, int buf_size)
if (!skb)
return NULL;
- skb_reserve(skb, MT_DMA_HDR_LEN);
+ skb_reserve(skb, head_room);
__skb_put(skb, len);
return skb;
}
static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
+ int buf_size)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
- int len, nsgs = 1;
+ int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
return 0;
- len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
if (len < 0)
return 0;
- data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ data_len = min_t(int, len, data_len - head_room);
+ skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
@@ -481,8 +590,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
- urb->sg[nsgs].offset,
- data_len, q->buf_size);
+ urb->sg[nsgs].offset, data_len,
+ buf_size);
len -= data_len;
nsgs++;
}
@@ -493,8 +602,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
static void mt76u_complete_rx(struct urb *urb)
{
- struct mt76_dev *dev = urb->context;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue *q = urb->context;
unsigned long flags;
trace_rx_urb(dev, urb);
@@ -524,50 +633,62 @@ out:
}
static int
-mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
+mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct urb *urb)
{
- mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
- mt76u_complete_rx, dev);
+ int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
+
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
+ mt76u_complete_rx, &dev->q_rx[qid]);
trace_submit_urb(dev, urb);
return usb_submit_urb(urb, GFP_ATOMIC);
}
-static void mt76u_rx_tasklet(unsigned long data)
+static void
+mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ int qid = q - &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb;
int err, count;
- rcu_read_lock();
-
while (true) {
- urb = mt76u_get_next_rx_entry(dev);
+ urb = mt76u_get_next_rx_entry(q);
if (!urb)
break;
- count = mt76u_process_rx_entry(dev, urb);
+ count = mt76u_process_rx_entry(dev, urb, q->buf_size);
if (count > 0) {
- err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}
- mt76u_submit_rx_buf(dev, urb);
+ mt76u_submit_rx_buf(dev, qid, urb);
}
- mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+ if (qid == MT_RXQ_MAIN)
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+}
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+
+ rcu_read_lock();
+ mt76u_process_rx_queue(dev, q);
rcu_read_unlock();
}
-static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+static int
+mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
unsigned long flags;
int i, err = 0;
spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
+ err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
if (err < 0)
break;
}
@@ -578,16 +699,12 @@ static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
return err;
}
-static int mt76u_alloc_rx(struct mt76_dev *dev)
+static int
+mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_usb *usb = &dev->usb;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
int i, err;
- usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
- if (!usb->mcu.data)
- return -ENOMEM;
-
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -599,17 +716,23 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = PAGE_SIZE;
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
+ err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
if (err < 0)
return err;
}
- return mt76u_submit_rx_buffers(dev);
+ return mt76u_submit_rx_buffers(dev, qid);
}
-static void mt76u_free_rx(struct mt76_dev *dev)
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
+{
+ return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
+
+static void
+mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct page *page;
int i;
@@ -624,13 +747,33 @@ static void mt76u_free_rx(struct mt76_dev *dev)
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
-void mt76u_stop_rx(struct mt76_dev *dev)
+static void mt76u_free_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q;
int i;
- for (i = 0; i < q->ndesc; i++)
- usb_poison_urb(q->entry[i].urb);
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+
+ mt76u_free_rx_queue(dev, q);
+ }
+}
+
+void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_poison_urb(q->entry[j].urb);
+ }
tasklet_kill(&dev->usb.rx_tasklet);
}
@@ -638,13 +781,24 @@ EXPORT_SYMBOL_GPL(mt76u_stop_rx);
int mt76u_resume_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i;
+ struct mt76_queue *q;
+ int i, j, err;
- for (i = 0; i < q->ndesc; i++)
- usb_unpoison_urb(q->entry[i].urb);
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
- return mt76u_submit_rx_buffers(dev);
+ if (!q->ndesc)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_unpoison_urb(q->entry[j].urb);
+
+ err = mt76u_submit_rx_buffers(dev, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76u_resume_rx);
@@ -694,10 +848,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_unlock_bh(&q->lock);
- mt76_txq_schedule(dev, i);
+ mt76_txq_schedule(&dev->phy, i);
- if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
- queue_work(dev->usb.stat_wq, &dev->usb.stat_work);
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->usb.wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@@ -714,7 +869,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
dev = container_of(usb, struct mt76_dev, usb);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
@@ -722,10 +877,10 @@ static void mt76u_tx_status_data(struct work_struct *work)
count++;
}
- if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
- queue_work(usb->stat_wq, &usb->stat_work);
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ queue_work(usb->wq, &usb->stat_work);
else
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
}
static void mt76u_complete_tx(struct urb *urb)
@@ -759,6 +914,35 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
return urb->num_sgs;
}
+int mt76u_skb_dma_info(struct sk_buff *skb, u32 info)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 pad;
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ /* Add zero pad of 4 - 7 bytes */
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+
+ /* First packet of a A-MSDU burst keeps track of the whole burst
+ * length, need to update length of it and the last packet.
+ */
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (skb_pad(last, pad))
+ return -ENOMEM;
+ __skb_put(last, pad);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
static int
mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -806,7 +990,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
else
dev_err(dev->dev, "tx urb submit failed:%d\n",
err);
@@ -816,6 +1000,14 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
+static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+{
+ if (mt76_chip(dev) == 0x7663)
+ return ac ^ 0x3;
+
+ return mt76_ac_to_hwq(ac);
+}
+
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
@@ -834,7 +1026,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
return -ENOMEM;
spin_lock_init(&q->lock);
- q->hw_idx = mt76_ac_to_hwq(i);
+ q->hw_idx = mt76u_ac_to_hwq(dev, i);
dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
@@ -872,7 +1064,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
struct mt76_queue *q;
int i, j, ret;
- ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
dev_err(dev->dev, "timed out waiting for pending tx\n");
@@ -905,7 +1097,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
}
cancel_work_sync(&dev->usb.stat_work);
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
mt76_tx_status_check(dev, NULL, true);
}
@@ -925,7 +1117,7 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
{
int err;
- err = mt76u_alloc_rx(dev);
+ err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
if (err < 0)
return err;
@@ -938,14 +1130,20 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
+void mt76u_deinit(struct mt76_dev *dev)
+{
+ if (dev->usb.wq) {
+ destroy_workqueue(dev->usb.wq);
+ dev->usb.wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76u_deinit);
+
int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf)
+ struct usb_interface *intf, bool ext)
{
- static const struct mt76_bus_ops mt76u_ops = {
- .rr = mt76u_rr,
- .wr = mt76u_wr,
- .rmw = mt76u_rmw,
- .write_copy = mt76u_copy,
+ static struct mt76_bus_ops mt76u_ops = {
+ .read_copy = mt76u_read_copy_ext,
.wr_rp = mt76u_wr_rp,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
@@ -953,16 +1151,28 @@ int mt76u_init(struct mt76_dev *dev,
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
+ mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
+ mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
+ mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
+ mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
+
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
- usb->stat_wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
- if (!usb->stat_wq)
+ usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
+ if (!usb->wq)
return -ENOMEM;
- mutex_init(&usb->mcu.mutex);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ if (usb->data_len < 32)
+ usb->data_len = 32;
+
+ usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
+ if (!usb->data) {
+ mt76u_deinit(dev);
+ return -ENOMEM;
+ }
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
@@ -976,14 +1186,5 @@ int mt76u_init(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76u_init);
-void mt76u_deinit(struct mt76_dev *dev)
-{
- if (dev->usb.stat_wq) {
- destroy_workqueue(dev->usb.stat_wq);
- dev->usb.stat_wq = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(mt76u_deinit);
-
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 23d1e1da78b2..8c60c450125a 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -64,7 +64,7 @@ int mt76_wcid_alloc(unsigned long *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev)
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
{
struct mt76_wcid *wcid;
int i, j, min_rssi = 0;
@@ -75,14 +75,18 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
unsigned long mask = dev->wcid_mask[i];
+ unsigned long phy_mask = dev->wcid_phy_mask[i];
if (!mask)
continue;
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1, phy_mask >>= 1) {
if (!(mask & 1))
continue;
+ if (!!(phy_mask & 1) != ext_phy)
+ continue;
+
wcid = rcu_dereference(dev->wcid[j]);
if (!wcid)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index fe3479c8e561..48a71e7479e5 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -16,8 +16,20 @@
int mt76_wcid_alloc(unsigned long *mask, int size);
+static inline bool
+mt76_wcid_mask_test(unsigned long *mask, int idx)
+{
+ return mask[idx / BITS_PER_LONG] & BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_wcid_mask_set(unsigned long *mask, int idx)
+{
+ mask[idx / BITS_PER_LONG] |= BIT(idx % BITS_PER_LONG);
+}
+
static inline void
-mt76_wcid_free(unsigned long *mask, int idx)
+mt76_wcid_mask_clear(unsigned long *mask, int idx)
{
mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8849faa5bc10..73d5014a4234 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -248,7 +248,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
goto error_del_vif;
}
- if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret) {
unregister_netdevice(vif->netdev);
@@ -739,7 +739,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_supported_band *sband;
const struct cfg80211_chan_def *chandef = &wdev->chandef;
struct ieee80211_channel *chan;
- struct qtnf_chan_stats stats;
int ret;
sband = wiphy->bands[NL80211_BAND_2GHZ];
@@ -755,49 +754,16 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
chan = &sband->channels[idx];
- memset(&stats, 0, sizeof(stats));
-
survey->channel = chan;
survey->filled = 0x0;
- if (chandef->chan) {
- if (chan->hw_value == chandef->chan->hw_value)
- survey->filled = SURVEY_INFO_IN_USE;
- }
+ if (chan == chandef->chan)
+ survey->filled = SURVEY_INFO_IN_USE;
- ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats);
- switch (ret) {
- case 0:
- if (unlikely(stats.chan_num != chan->hw_value)) {
- pr_err("received stats for channel %d instead of %d\n",
- stats.chan_num, chan->hw_value);
- ret = -EINVAL;
- break;
- }
-
- survey->filled |= SURVEY_INFO_TIME |
- SURVEY_INFO_TIME_SCAN |
- SURVEY_INFO_TIME_BUSY |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_TX |
- SURVEY_INFO_NOISE_DBM;
-
- survey->time_scan = stats.cca_try;
- survey->time = stats.cca_try;
- survey->time_tx = stats.cca_tx;
- survey->time_rx = stats.cca_rx;
- survey->time_busy = stats.cca_busy;
- survey->noise = stats.chan_noise;
- break;
- case -ENOENT:
- pr_debug("no stats for channel %u\n", chan->hw_value);
- ret = 0;
- break;
- default:
+ ret = qtnf_cmd_get_chan_stats(mac, chan->center_freq, survey);
+ if (ret)
pr_debug("failed to get chan(%d) stats from card\n",
chan->hw_value);
- break;
- }
return ret;
}
@@ -1080,10 +1046,10 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
struct wiphy *wiphy;
if (qtnf_dfs_offload_get() &&
- (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
+ qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
- if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ if (!qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_PWR_MGMT))
qtn_cfg80211_ops.set_power_mgmt = NULL;
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
@@ -1142,7 +1108,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->coverage_class = macinfo->coverage_class;
wiphy->max_scan_ssids =
- (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
+ (macinfo->max_scan_ssids) ? macinfo->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -1166,10 +1132,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (qtnf_dfs_offload_get() &&
- (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
+ qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_DWELL))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
@@ -1185,16 +1151,16 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
ether_addr_copy(wiphy->perm_addr, mac->macaddr);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_STA_INACT_TIMEOUT))
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR))
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
- if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
+ if (!qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_OBSS_SCAN))
wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SAE)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SAE))
wiphy->features |= NL80211_FEATURE_SAE;
#ifdef CONFIG_PM
@@ -1205,7 +1171,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
isalpha(mac->rd->alpha2[1]);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_REG_UPDATE)) {
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index d0d7ec8794c4..ccc1e06dfcf6 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -11,11 +11,11 @@
#include "bus.h"
#include "commands.h"
+/* Let device itself to select best values for current conditions */
#define QTNF_SCAN_TIME_AUTO 0
-/* Let device itself to select best values for current conditions */
-#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO
-#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT 90
+#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT 100
#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO
static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
@@ -175,7 +175,8 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
{
struct qlink_tlv_ie_set *tlv;
- tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) + len);
+ tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) +
+ round_up(len, QLINK_ALIGN));
tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_IE_SET);
tlv->hdr.len = cpu_to_le16(len + sizeof(*tlv) - sizeof(tlv->hdr));
tlv->type = frame_type;
@@ -190,20 +191,24 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
{
unsigned int len = sizeof(struct qlink_cmd_start_ap);
- len += s->ssid_len;
- len += s->beacon.head_len;
- len += s->beacon.tail_len;
- len += s->beacon.beacon_ies_len;
- len += s->beacon.proberesp_ies_len;
- len += s->beacon.assocresp_ies_len;
- len += s->beacon.probe_resp_len;
+ len += round_up(s->ssid_len, QLINK_ALIGN);
+ len += round_up(s->beacon.head_len, QLINK_ALIGN);
+ len += round_up(s->beacon.tail_len, QLINK_ALIGN);
+ len += round_up(s->beacon.beacon_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.proberesp_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.assocresp_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.probe_resp_len, QLINK_ALIGN);
if (cfg80211_chandef_valid(&s->chandef))
len += sizeof(struct qlink_tlv_chandef);
- if (s->acl)
+ if (s->acl) {
+ unsigned int acl_len = struct_size(s->acl, mac_addrs,
+ s->acl->n_acl_entries);
+
len += sizeof(struct qlink_tlv_hdr) +
- struct_size(s->acl, mac_addrs, s->acl->n_acl_entries);
+ round_up(acl_len, QLINK_ALIGN);
+ }
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
@@ -315,7 +320,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
if (s->ht_cap) {
struct qlink_tlv_hdr *tlv = (struct qlink_tlv_hdr *)
- skb_put(cmd_skb, sizeof(*tlv) + sizeof(*s->ht_cap));
+ skb_put(cmd_skb, sizeof(*tlv) +
+ round_up(sizeof(*s->ht_cap), QLINK_ALIGN));
tlv->type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
tlv->len = cpu_to_le16(sizeof(*s->ht_cap));
@@ -339,7 +345,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
size_t acl_size = struct_size(s->acl, mac_addrs,
s->acl->n_acl_entries);
struct qlink_tlv_hdr *tlv =
- skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ skb_put(cmd_skb,
+ sizeof(*tlv) + round_up(acl_size, QLINK_ALIGN));
tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
tlv->len = cpu_to_le16(acl_size);
@@ -581,10 +588,10 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
}
static void
-qtnf_cmd_sta_info_parse(struct station_info *sinfo,
- const struct qlink_tlv_hdr *tlv,
+qtnf_cmd_sta_info_parse(struct station_info *sinfo, const u8 *data,
size_t resp_size)
{
+ const struct qlink_tlv_hdr *tlv;
const struct qlink_sta_stats *stats = NULL;
const u8 *map = NULL;
unsigned int map_len = 0;
@@ -595,11 +602,11 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
(qtnf_utils_is_bit_set(map, bitn, map_len) && \
(offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
- while (resp_size >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, data, resp_size) {
tlv_len = le16_to_cpu(tlv->len);
switch (le16_to_cpu(tlv->type)) {
- case QTN_TLV_ID_STA_STATS_MAP:
+ case QTN_TLV_ID_BITMAP:
map_len = tlv_len;
map = tlv->val;
break;
@@ -610,9 +617,11 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
default:
break;
}
+ }
- resp_size -= tlv_len + sizeof(*tlv);
- tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+ if (!qlink_tlv_parsing_ok(tlv, data, resp_size)) {
+ pr_err("Malformed TLV buffer\n");
+ return;
}
if (!map || !stats)
@@ -736,9 +745,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
goto out;
}
- qtnf_cmd_sta_info_parse(sinfo,
- (const struct qlink_tlv_hdr *)resp->info,
- var_resp_len);
+ qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -895,31 +902,21 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const char *uboot_ver = NULL;
u32 hw_ver = 0;
u16 tlv_type;
- u16 tlv_value_len;
+ u16 tlv_len;
hwinfo->num_mac = resp->num_mac;
hwinfo->mac_bitmap = resp->mac_bitmap;
hwinfo->fw_ver = le32_to_cpu(resp->fw_ver);
- hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver);
hwinfo->total_tx_chain = resp->total_tx_chain;
hwinfo->total_rx_chain = resp->total_rx_chain;
- hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
plat_id = le32_to_cpu(resp->plat_id);
hw_ver = le32_to_cpu(resp->hw_ver);
- tlv = (const struct qlink_tlv_hdr *)resp->info;
-
- while (info_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, resp->info, info_len) {
tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
-
- if (tlv_value_len + sizeof(*tlv) > info_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
+ tlv_len = le16_to_cpu(tlv->len);
switch (tlv_type) {
case QTN_TLV_ID_BUILD_NAME:
@@ -943,36 +940,43 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
- case QTN_TLV_ID_MAX_SCAN_SSIDS:
- hwinfo->max_scan_ssids = *tlv->val;
+ case QTN_TLV_ID_BITMAP:
+ memcpy(hwinfo->hw_capab, tlv->val,
+ min(sizeof(hwinfo->hw_capab), (size_t)tlv_len));
break;
default:
break;
}
+ }
- info_len -= tlv_value_len + sizeof(*tlv);
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n",
- hwinfo->fw_ver, hwinfo->mac_bitmap,
- hwinfo->total_tx_chain, hwinfo->total_rx_chain,
- hwinfo->hw_capab);
-
- pr_info("\nBuild name: %s" \
- "\nBuild revision: %s" \
- "\nBuild type: %s" \
- "\nBuild label: %s" \
- "\nBuild timestamp: %lu" \
- "\nPlatform ID: %lu" \
- "\nHardware ID: %s" \
- "\nCalibration version: %s" \
- "\nU-Boot version: %s" \
- "\nHardware version: 0x%08x\n",
+ if (!qlink_tlv_parsing_ok(tlv, resp->info, info_len)) {
+ pr_err("Malformed TLV buffer\n");
+ return -EINVAL;
+ }
+
+ pr_info("\nBuild name: %s\n"
+ "Build revision: %s\n"
+ "Build type: %s\n"
+ "Build label: %s\n"
+ "Build timestamp: %lu\n"
+ "Platform ID: %lu\n"
+ "Hardware ID: %s\n"
+ "Calibration version: %s\n"
+ "U-Boot version: %s\n"
+ "Hardware version: 0x%08x\n"
+ "Qlink ver: %u.%u\n"
+ "MACs map: %#x\n"
+ "Chains Rx-Tx: %ux%u\n"
+ "FW version: 0x%x\n",
bld_name, bld_rev, bld_type, bld_label,
(unsigned long)bld_tmstamp,
(unsigned long)plat_id,
- hw_id, calibration_ver, uboot_ver, hw_ver);
+ hw_id, calibration_ver, uboot_ver, hw_ver,
+ QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver),
+ QLINK_VER_MINOR(bus->hw_info.ql_proto_ver),
+ hwinfo->mac_bitmap,
+ hwinfo->total_rx_chain, hwinfo->total_tx_chain,
+ hwinfo->fw_ver);
strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
hwinfo->hw_version = hw_ver;
@@ -1016,18 +1020,15 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_resp_get_mac_info *resp,
size_t tlv_buf_size)
{
- const u8 *tlv_buf = resp->var_info;
- struct ieee80211_iface_combination *comb = NULL;
+ struct ieee80211_iface_combination *comb = mac->macinfo.if_comb;
size_t n_comb = 0;
struct ieee80211_iface_limit *limits;
- const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
u8 *ext_capa = NULL;
u8 *ext_capa_mask = NULL;
@@ -1066,44 +1067,11 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
break;
}
- tlv = (const struct qlink_tlv_hdr *)tlv_buf;
- while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, resp->var_info, tlv_buf_size) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > tlv_buf_size) {
- pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, tlv_type, tlv_value_len);
- return -EINVAL;
- }
switch (tlv_type) {
- case QTN_TLV_ID_NUM_IFACE_COMB:
- if (tlv_value_len != sizeof(*comb_num))
- return -EINVAL;
-
- comb_num = (void *)tlv->val;
-
- /* free earlier iface comb memory */
- qtnf_mac_iface_comb_free(mac);
-
- mac->macinfo.n_if_comb =
- le32_to_cpu(comb_num->iface_comb_num);
-
- mac->macinfo.if_comb =
- kcalloc(mac->macinfo.n_if_comb,
- sizeof(*mac->macinfo.if_comb),
- GFP_KERNEL);
-
- if (!mac->macinfo.if_comb)
- return -ENOMEM;
-
- comb = mac->macinfo.if_comb;
-
- pr_debug("MAC%u: %zu iface combinations\n",
- mac->macid, mac->macinfo.n_if_comb);
-
- break;
case QTN_TLV_ID_IFACE_LIMIT:
if (unlikely(!comb)) {
pr_warn("MAC%u: no combinations advertised\n",
@@ -1207,14 +1175,10 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
mac->macid, tlv_type);
break;
}
-
- tlv_buf_size -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (tlv_buf_size) {
- pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, tlv_buf_size);
+ if (!qlink_tlv_parsing_ok(tlv, resp->var_info, tlv_buf_size)) {
+ pr_err("Malformed TLV buffer\n");
return -EINVAL;
}
@@ -1260,13 +1224,15 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return 0;
}
-static void
+static int
qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
const struct qlink_resp_get_mac_info *resp_info)
{
struct qtnf_mac_info *mac_info;
struct qtnf_vif *vif;
+ qtnf_mac_iface_comb_free(mac);
+
mac_info = &mac->macinfo;
mac_info->bands_cap = resp_info->bands_cap;
@@ -1285,12 +1251,28 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info->radar_detect_widths =
qlink_chan_width_mask_to_nl(le16_to_cpu(
resp_info->radar_detect_widths));
- mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
+ mac_info->max_acl_mac_addrs = le16_to_cpu(resp_info->max_acl_mac_addrs);
+ mac_info->frag_thr = le32_to_cpu(resp_info->frag_threshold);
+ mac_info->rts_thr = le32_to_cpu(resp_info->rts_threshold);
+ mac_info->sretry_limit = resp_info->retry_short;
+ mac_info->lretry_limit = resp_info->retry_long;
+ mac_info->coverage_class = resp_info->coverage_class;
+ mac_info->max_scan_ssids = resp_info->max_scan_ssids;
memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
sizeof(mac_info->ht_cap_mod_mask));
memcpy(&mac_info->vht_cap_mod_mask, &resp_info->vht_cap_mod_mask,
sizeof(mac_info->vht_cap_mod_mask));
+
+ mac_info->n_if_comb = resp_info->n_iface_combinations;
+ mac_info->if_comb = kcalloc(mac->macinfo.n_if_comb,
+ sizeof(*mac->macinfo.if_comb),
+ GFP_KERNEL);
+
+ if (!mac->macinfo.if_comb)
+ return -ENOMEM;
+
+ return 0;
}
static void qtnf_cmd_resp_band_fill_htcap(const u8 *info,
@@ -1389,7 +1371,6 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
size_t payload_len)
{
u16 tlv_type;
- size_t tlv_len;
size_t tlv_dlen;
const struct qlink_tlv_hdr *tlv;
const struct qlink_channel *qchan;
@@ -1424,24 +1405,15 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
return -ENOMEM;
}
- tlv = (struct qlink_tlv_hdr *)resp->info;
-
- while (payload_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, resp->info, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_dlen = le16_to_cpu(tlv->len);
- tlv_len = tlv_dlen + sizeof(*tlv);
-
- if (tlv_len > payload_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %zu\n",
- tlv_type, tlv_len);
- goto error_ret;
- }
switch (tlv_type) {
case QTN_TLV_ID_CHANNEL:
if (unlikely(tlv_dlen != sizeof(*qchan))) {
pr_err("invalid channel TLV len %zu\n",
- tlv_len);
+ tlv_dlen);
goto error_ret;
}
@@ -1544,13 +1516,10 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
pr_warn("unknown TLV type: %#x\n", tlv_type);
break;
}
-
- payload_len -= tlv_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_dlen);
}
- if (payload_len) {
- pr_err("malformed TLV buf; bytes left: %zu\n", payload_len);
+ if (!qlink_tlv_parsing_ok(tlv, resp->info, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
goto error_ret;
}
@@ -1570,128 +1539,6 @@ error_ret:
return ret;
}
-static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
- const u8 *payload, size_t payload_len)
-{
- struct qtnf_mac_info *mac_info;
- struct qlink_tlv_frag_rts_thr *phy_thr;
- struct qlink_tlv_rlimit *limit;
- struct qlink_tlv_cclass *class;
- u16 tlv_type;
- u16 tlv_value_len;
- size_t tlv_full_len;
- const struct qlink_tlv_hdr *tlv;
-
- mac_info = &mac->macinfo;
-
- tlv = (struct qlink_tlv_hdr *)payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len) {
- pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, tlv_type, tlv_value_len);
- return -EINVAL;
- }
-
- switch (tlv_type) {
- case QTN_TLV_ID_FRAG_THRESH:
- phy_thr = (void *)tlv;
- mac_info->frag_thr = le32_to_cpu(phy_thr->thr);
- break;
- case QTN_TLV_ID_RTS_THRESH:
- phy_thr = (void *)tlv;
- mac_info->rts_thr = le32_to_cpu(phy_thr->thr);
- break;
- case QTN_TLV_ID_SRETRY_LIMIT:
- limit = (void *)tlv;
- mac_info->sretry_limit = limit->rlimit;
- break;
- case QTN_TLV_ID_LRETRY_LIMIT:
- limit = (void *)tlv;
- mac_info->lretry_limit = limit->rlimit;
- break;
- case QTN_TLV_ID_COVERAGE_CLASS:
- class = (void *)tlv;
- mac_info->coverage_class = class->cclass;
- break;
- default:
- pr_err("MAC%u: Unknown TLV type: %#x\n", mac->macid,
- le16_to_cpu(tlv->type));
- break;
- }
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- if (payload_len) {
- pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, payload_len);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-qtnf_cmd_resp_proc_chan_stat_info(struct qtnf_chan_stats *stats,
- const u8 *payload, size_t payload_len)
-{
- struct qlink_chan_stats *qlink_stats;
- const struct qlink_tlv_hdr *tlv;
- size_t tlv_full_len;
- u16 tlv_value_len;
- u16 tlv_type;
-
- tlv = (struct qlink_tlv_hdr *)payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
- switch (tlv_type) {
- case QTN_TLV_ID_CHANNEL_STATS:
- if (unlikely(tlv_value_len != sizeof(*qlink_stats))) {
- pr_err("invalid CHANNEL_STATS entry size\n");
- return -EINVAL;
- }
-
- qlink_stats = (void *)tlv->val;
-
- stats->chan_num = le32_to_cpu(qlink_stats->chan_num);
- stats->cca_tx = le32_to_cpu(qlink_stats->cca_tx);
- stats->cca_rx = le32_to_cpu(qlink_stats->cca_rx);
- stats->cca_busy = le32_to_cpu(qlink_stats->cca_busy);
- stats->cca_try = le32_to_cpu(qlink_stats->cca_try);
- stats->chan_noise = qlink_stats->chan_noise;
-
- pr_debug("chan(%u) try(%u) busy(%u) noise(%d)\n",
- stats->chan_num, stats->cca_try,
- stats->cca_busy, stats->chan_noise);
- break;
- default:
- pr_warn("Unknown TLV type: %#x\n",
- le16_to_cpu(tlv->type));
- }
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- if (payload_len) {
- pr_warn("malformed TLV buf; bytes left: %zu\n", payload_len);
- return -EINVAL;
- }
-
- return 0;
-}
-
int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
@@ -1712,7 +1559,10 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
goto out;
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
- qtnf_cmd_resp_proc_mac_info(mac, resp);
+ ret = qtnf_cmd_resp_proc_mac_info(mac, resp);
+ if (ret)
+ goto out;
+
ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len);
out:
@@ -1793,35 +1643,6 @@ out:
return ret;
}
-int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
-{
- struct sk_buff *cmd_skb, *resp_skb = NULL;
- struct qlink_resp_phy_params *resp;
- size_t response_size = 0;
- int ret = 0;
-
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
- QLINK_CMD_PHY_PARAMS_GET,
- sizeof(struct qlink_cmd));
- if (!cmd_skb)
- return -ENOMEM;
-
- qtnf_bus_lock(mac->bus);
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
- sizeof(*resp), &response_size);
- if (ret)
- goto out;
-
- resp = (struct qlink_resp_phy_params *)resp_skb->data;
- ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
-
-out:
- qtnf_bus_unlock(mac->bus);
- consume_skb(resp_skb);
-
- return ret;
-}
-
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
@@ -1843,16 +1664,16 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH,
wiphy->rts_threshold);
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
- wiphy->coverage_class);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
+ wiphy->coverage_class);
if (changed & WIPHY_PARAM_RETRY_LONG)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
- wiphy->retry_long);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
+ wiphy->retry_long);
if (changed & WIPHY_PARAM_RETRY_SHORT)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
- wiphy->retry_short);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
+ wiphy->retry_short);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
@@ -1866,23 +1687,35 @@ out:
int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
{
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_resp_init_fw *resp;
+ struct qlink_cmd_init_fw *cmd;
struct sk_buff *cmd_skb;
- int ret = 0;
+ size_t info_len = 0;
+ int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_FW_INIT,
- sizeof(struct qlink_cmd));
+ sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
+ cmd = (struct qlink_cmd_init_fw *)cmd_skb->data;
+ cmd->qlink_proto_ver = cpu_to_le32(QLINK_PROTO_VER);
+
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb);
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), &info_len);
+ qtnf_bus_unlock(bus);
+
if (ret)
goto out;
-out:
- qtnf_bus_unlock(bus);
+ resp = (struct qlink_resp_init_fw *)resp_skb->data;
+ bus->hw_info.ql_proto_ver = le32_to_cpu(resp->qlink_proto_ver);
+out:
+ consume_skb(resp_skb);
return ret;
}
@@ -2178,108 +2011,90 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
}
-static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac,
- struct sk_buff *cmd_skb)
+int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct cfg80211_scan_request *scan_req = mac->scan_req;
- u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT;
- u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT;
-
- if (scan_req->duration) {
- dwell_active = scan_req->duration;
- dwell_passive = scan_req->duration;
- }
-
- pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n",
- mac->macid,
- scan_req->duration_mandatory ? "mandatory" : "max",
- dwell_active, dwell_passive, duration);
-
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_DWELL_ACTIVE,
- dwell_active);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_DWELL_PASSIVE,
- dwell_passive);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_SAMPLE_DURATION,
- duration);
-}
-
-int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
-{
- struct sk_buff *cmd_skb;
+ u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
+ struct wireless_dev *wdev = scan_req->wdev;
struct ieee80211_channel *sc;
- struct cfg80211_scan_request *scan_req = mac->scan_req;
- int n_channels;
- int count = 0;
+ struct qlink_cmd_scan *cmd;
+ struct sk_buff *cmd_skb;
+ int n_channels = 0;
+ u64 flags = 0;
+ int count;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
- sizeof(struct qlink_cmd));
+ sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
+ cmd = (struct qlink_cmd_scan *)cmd_skb->data;
- if (scan_req->n_ssids != 0) {
- while (count < scan_req->n_ssids) {
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
- scan_req->ssids[count].ssid,
- scan_req->ssids[count].ssid_len);
- count++;
- }
+ if (scan_req->duration) {
+ dwell_active = scan_req->duration;
+ dwell_passive = scan_req->duration;
+ } else if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ wdev->current_bss) {
+ /* let device select dwell based on traffic conditions */
+ dwell_active = QTNF_SCAN_TIME_AUTO;
+ dwell_passive = QTNF_SCAN_TIME_AUTO;
+ }
+
+ cmd->n_ssids = cpu_to_le16(scan_req->n_ssids);
+ for (count = 0; count < scan_req->n_ssids; ++count) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
+ scan_req->ssids[count].ssid,
+ scan_req->ssids[count].ssid_len);
}
if (scan_req->ie_len != 0)
qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_PROBE_REQ,
scan_req->ie, scan_req->ie_len);
- if (scan_req->n_channels) {
- n_channels = scan_req->n_channels;
- count = 0;
-
- while (n_channels != 0) {
- sc = scan_req->channels[count];
- if (sc->flags & IEEE80211_CHAN_DISABLED) {
- n_channels--;
- continue;
- }
+ for (count = 0; count < scan_req->n_channels; ++count) {
+ sc = scan_req->channels[count];
+ if (sc->flags & IEEE80211_CHAN_DISABLED)
+ continue;
- pr_debug("MAC%u: scan chan=%d, freq=%d, flags=%#x\n",
- mac->macid, sc->hw_value, sc->center_freq,
- sc->flags);
+ pr_debug("[MAC%u] scan chan=%d, freq=%d, flags=%#x\n",
+ mac->macid, sc->hw_value, sc->center_freq,
+ sc->flags);
- qtnf_cmd_channel_tlv_add(cmd_skb, sc);
- n_channels--;
- count++;
- }
+ qtnf_cmd_channel_tlv_add(cmd_skb, sc);
+ ++n_channels;
}
- qtnf_cmd_scan_set_dwell(mac, cmd_skb);
+ if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH)
+ flags |= QLINK_SCAN_FLAG_FLUSH;
+
+ if (scan_req->duration_mandatory)
+ flags |= QLINK_SCAN_FLAG_DURATION_MANDATORY;
+
+ cmd->n_channels = cpu_to_le16(n_channels);
+ cmd->active_dwell = cpu_to_le16(dwell_active);
+ cmd->passive_dwell = cpu_to_le16(dwell_passive);
+ cmd->sample_duration = cpu_to_le16(QTNF_SCAN_SAMPLE_DURATION_DEFAULT);
+ cmd->flags = cpu_to_le64(flags);
+
+ pr_debug("[MAC%u] %s scan dwell active=%u passive=%u duration=%u\n",
+ mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ dwell_active, dwell_passive,
+ QTNF_SCAN_SAMPLE_DURATION_DEFAULT);
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
+ pr_debug("[MAC%u] scan with random addr=%pM, mask=%pM\n",
mac->macid,
scan_req->mac_addr, scan_req->mac_addr_mask);
-
qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr,
scan_req->mac_addr_mask);
}
- if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) {
- pr_debug("MAC%u: flush cache before scan\n", mac->macid);
-
- qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
- }
-
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -2552,8 +2367,91 @@ int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
return ret;
}
-int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
- struct qtnf_chan_stats *stats)
+static int
+qtnf_cmd_resp_proc_chan_stat_info(struct survey_info *survey,
+ const u8 *payload, size_t payload_len)
+{
+ const struct qlink_chan_stats *stats = NULL;
+ const struct qlink_tlv_hdr *tlv;
+ u16 tlv_value_len;
+ u16 tlv_type;
+ const u8 *map = NULL;
+ unsigned int map_len = 0;
+ unsigned int stats_len = 0;
+
+ qlink_for_each_tlv(tlv, payload, payload_len) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+
+ switch (tlv_type) {
+ case QTN_TLV_ID_BITMAP:
+ map = tlv->val;
+ map_len = tlv_value_len;
+ break;
+ case QTN_TLV_ID_CHANNEL_STATS:
+ stats = (struct qlink_chan_stats *)tlv->val;
+ stats_len = tlv_value_len;
+ break;
+ default:
+ pr_info("Unknown TLV type: %#x\n", tlv_type);
+ break;
+ }
+ }
+
+ if (!qlink_tlv_parsing_ok(tlv, payload, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
+ return -EINVAL;
+ }
+
+ if (!map || !stats)
+ return 0;
+
+#define qtnf_chan_stat_avail(stat_name, bitn) \
+ (qtnf_utils_is_bit_set(map, bitn, map_len) && \
+ (offsetofend(struct qlink_chan_stats, stat_name) <= stats_len))
+
+ if (qtnf_chan_stat_avail(time_on, QLINK_CHAN_STAT_TIME_ON)) {
+ survey->filled |= SURVEY_INFO_TIME;
+ survey->time = le64_to_cpu(stats->time_on);
+ }
+
+ if (qtnf_chan_stat_avail(time_tx, QLINK_CHAN_STAT_TIME_TX)) {
+ survey->filled |= SURVEY_INFO_TIME_TX;
+ survey->time_tx = le64_to_cpu(stats->time_tx);
+ }
+
+ if (qtnf_chan_stat_avail(time_rx, QLINK_CHAN_STAT_TIME_RX)) {
+ survey->filled |= SURVEY_INFO_TIME_RX;
+ survey->time_rx = le64_to_cpu(stats->time_rx);
+ }
+
+ if (qtnf_chan_stat_avail(cca_busy, QLINK_CHAN_STAT_CCA_BUSY)) {
+ survey->filled |= SURVEY_INFO_TIME_BUSY;
+ survey->time_busy = le64_to_cpu(stats->cca_busy);
+ }
+
+ if (qtnf_chan_stat_avail(cca_busy_ext, QLINK_CHAN_STAT_CCA_BUSY_EXT)) {
+ survey->filled |= SURVEY_INFO_TIME_EXT_BUSY;
+ survey->time_ext_busy = le64_to_cpu(stats->cca_busy_ext);
+ }
+
+ if (qtnf_chan_stat_avail(time_scan, QLINK_CHAN_STAT_TIME_SCAN)) {
+ survey->filled |= SURVEY_INFO_TIME_SCAN;
+ survey->time_scan = le64_to_cpu(stats->time_scan);
+ }
+
+ if (qtnf_chan_stat_avail(chan_noise, QLINK_CHAN_STAT_CHAN_NOISE)) {
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+ survey->noise = stats->chan_noise;
+ }
+
+#undef qtnf_chan_stat_avail
+
+ return 0;
+}
+
+int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u32 chan_freq,
+ struct survey_info *survey)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_chan_stats *cmd;
@@ -2567,22 +2465,30 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
-
cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data;
- cmd->channel = cpu_to_le16(channel);
+ cmd->channel_freq = cpu_to_le32(chan_freq);
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
+ qtnf_bus_unlock(mac->bus);
+
if (ret)
goto out;
resp = (struct qlink_resp_get_chan_stats *)resp_skb->data;
- ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info,
+
+ if (le32_to_cpu(resp->chan_freq) != chan_freq) {
+ pr_err("[MAC%u] channel stats freq %u != requested %u\n",
+ mac->macid, le32_to_cpu(resp->chan_freq), chan_freq);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = qtnf_cmd_resp_proc_chan_stat_info(survey, resp->info,
var_data_len);
out:
- qtnf_bus_unlock(mac->bus);
consume_skb(resp_skb);
return ret;
@@ -2595,6 +2501,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct qlink_cmd_chan_switch *cmd;
struct sk_buff *cmd_skb;
int ret;
+ u64 flags = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
QLINK_CMD_CHAN_SWITCH,
@@ -2602,19 +2509,19 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
+ if (params->radar_required)
+ flags |= QLINK_CHAN_SW_RADAR_REQUIRED;
+
+ if (params->block_tx)
+ flags |= QLINK_CHAN_SW_BLOCK_TX;
cmd = (struct qlink_cmd_chan_switch *)cmd_skb->data;
- cmd->channel = cpu_to_le16(params->chandef.chan->hw_value);
- cmd->radar_required = params->radar_required;
- cmd->block_tx = params->block_tx;
+ qlink_chandef_cfg2q(&params->chandef, &cmd->channel);
+ cmd->flags = cpu_to_le64(flags);
cmd->beacon_count = params->count;
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -2695,7 +2602,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
if (!cmd_skb)
return -ENOMEM;
- tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ tlv = skb_put(cmd_skb, sizeof(*tlv) + round_up(acl_size, QLINK_ALIGN));
tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
tlv->len = cpu_to_le16(acl_size);
qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index ab273257b078..9db695101d28 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -59,8 +59,8 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
bool slave_radar, bool dfs_offload);
-int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
- struct qtnf_chan_stats *stats);
+int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u32 chan_freq,
+ struct survey_info *survey);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params);
int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 4320180f8c07..9e666fac8b5f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/if_ether.h>
+#include <linux/nospec.h>
#include "core.h"
#include "bus.h"
@@ -41,11 +42,12 @@ struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
- if (unlikely(macid >= QTNF_MAX_MAC)) {
+ if (macid >= QTNF_MAX_MAC) {
pr_err("invalid MAC index %u\n", macid);
return NULL;
}
+ macid = array_index_nospec(macid, QTNF_MAX_MAC);
mac = bus->mac[macid];
if (unlikely(!mac)) {
@@ -497,7 +499,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
- if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)
+ if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE))
dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
qdev_vif = netdev_priv(dev);
@@ -585,16 +587,6 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
return PTR_ERR(mac);
}
- ret = qtnf_cmd_get_mac_info(mac);
- if (ret) {
- pr_err("MAC%u: failed to get info\n", macid);
- goto error;
- }
-
- /* Use MAC address of the first active radio as a unique device ID */
- if (is_zero_ether_addr(mac->bus->hw_id))
- ether_addr_copy(mac->bus->hw_id, mac->macaddr);
-
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not ready\n", macid);
@@ -609,12 +601,16 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_get_phy_params(mac);
+ ret = qtnf_cmd_get_mac_info(mac);
if (ret) {
- pr_err("MAC%u: failed to get PHY settings\n", macid);
+ pr_err("MAC%u: failed to get MAC info\n", macid);
goto error_del_vif;
}
+ /* Use MAC address of the first active radio as a unique device ID */
+ if (is_zero_ether_addr(mac->bus->hw_id))
+ ether_addr_copy(mac->bus->hw_id, mac->macaddr);
+
ret = qtnf_mac_init_bands(mac);
if (ret) {
pr_err("MAC%u: failed to init bands\n", macid);
@@ -639,7 +635,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error_del_vif;
}
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ if (qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret)
goto error;
@@ -705,7 +701,8 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
info->linking ? "add" : "del");
if (IS_ENABLED(CONFIG_NET_SWITCHDEV) &&
- (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)) {
+ qtnf_hwcap_is_set(&bus->hw_info,
+ QLINK_HW_CAPAB_HW_BRIDGE)) {
if (info->linking)
br_domain = brdev->ifindex;
else
@@ -756,6 +753,15 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ if (QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver) !=
+ QLINK_PROTO_VER_MAJOR) {
+ pr_err("qlink driver vs FW version mismatch: %u vs %u\n",
+ QLINK_PROTO_VER_MAJOR,
+ QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver));
+ ret = -EPROTONOSUPPORT;
+ goto error;
+ }
+
bus->fw_state = QTNF_FW_STATE_ACTIVE;
ret = qtnf_cmd_get_hw_info(bus);
if (ret) {
@@ -763,14 +769,7 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
- if (bus->hw_info.ql_proto_ver != QLINK_PROTO_VER) {
- pr_err("qlink version mismatch %u != %u\n",
- QLINK_PROTO_VER, bus->hw_info.ql_proto_ver);
- ret = -EPROTONOSUPPORT;
- goto error;
- }
-
- if ((bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) &&
+ if (qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE) &&
bus->bus_ops->data_tx_use_meta_set)
bus->bus_ops->data_tx_use_meta_set(bus, true);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index d715e1cd0006..b993f9ca14c5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -23,6 +23,7 @@
#include "qlink.h"
#include "trans.h"
+#include "qlink_util.h"
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
@@ -83,7 +84,8 @@ struct qtnf_mac_info {
u8 sretry_limit;
u8 coverage_class;
u8 radar_detect_widths;
- u32 max_acl_mac_addrs;
+ u8 max_scan_ssids;
+ u16 max_acl_mac_addrs;
struct ieee80211_ht_cap ht_cap_mod_mask;
struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_iface_combination *if_comb;
@@ -94,15 +96,6 @@ struct qtnf_mac_info {
struct wiphy_wowlan_support *wowlan;
};
-struct qtnf_chan_stats {
- u32 chan_num;
- u32 cca_tx;
- u32 cca_rx;
- u32 cca_busy;
- u32 cca_try;
- s8 chan_noise;
-};
-
struct qtnf_wmac {
u8 macid;
u8 wiphy_registered;
@@ -117,16 +110,15 @@ struct qtnf_wmac {
};
struct qtnf_hw_info {
- u16 ql_proto_ver;
+ u32 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
u32 fw_ver;
- u32 hw_capab;
u8 total_tx_chain;
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
- u8 max_scan_ssids;
+ u8 hw_capab[QLINK_HW_CAPAB_NUM / BITS_PER_BYTE + 1];
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
@@ -140,7 +132,6 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
void qtnf_main_work_queue(struct work_struct *work);
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
-int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
@@ -160,4 +151,11 @@ static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
return *((void **)netdev_priv(dev));
}
+static inline bool qtnf_hwcap_is_set(const struct qtnf_hw_info *info,
+ unsigned int bit)
+{
+ return qtnf_utils_is_bit_set(info->hw_capab, bit,
+ sizeof(info->hw_capab));
+}
+
#endif /* _QTN_FMAC_CORE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 51af93bdf06e..7e408b5c5549 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include "cfg80211.h"
#include "core.h"
@@ -25,7 +26,6 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
int ret = 0;
@@ -58,23 +58,17 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo->generation = vif->generation;
payload_len = len - sizeof(*sta_assoc);
- tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
- while (payload_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, sta_assoc->ies, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len) {
- ret = -EINVAL;
- goto out;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set)) {
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr))) {
ret = -EINVAL;
goto out;
}
@@ -88,12 +82,10 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo->assoc_req_ies_len = ie_len;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len) {
+ if (!qlink_tlv_parsing_ok(tlv, sta_assoc->ies, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
ret = -EINVAL;
goto out;
}
@@ -153,7 +145,6 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
const u8 *rsp_ies = NULL;
size_t rsp_ies_len = 0;
@@ -235,24 +226,17 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
}
payload_len = len - sizeof(*join_info);
- tlv = (struct qlink_tlv_hdr *)join_info->ies;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, join_info->ies, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (payload_len < tlv_full_len) {
- pr_warn("invalid %u TLV\n", tlv_type);
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto done;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set)) {
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr))) {
pr_warn("invalid IE_SET TLV\n");
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto done;
@@ -275,15 +259,10 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
break;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
- pr_warn("VIF%u.%u: unexpected remaining payload: %zu\n",
- vif->mac->macid, vif->vifid, payload_len);
-
+ if (!qlink_tlv_parsing_ok(tlv, join_info->ies, payload_len))
+ pr_warn("Malformed TLV buffer\n");
done:
cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, rsp_ies,
rsp_ies_len, status, GFP_KERNEL);
@@ -368,7 +347,6 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
const u8 *ies = NULL;
size_t ies_len = 0;
@@ -387,21 +365,17 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
}
payload_len = len - sizeof(*sr);
- tlv = (struct qlink_tlv_hdr *)sr->payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, sr->payload, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len)
- return -EINVAL;
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set))
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr)))
return -EINVAL;
ie_set = (const struct qlink_tlv_ie_set *)tlv;
@@ -424,12 +398,9 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
ies_len = ie_len;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
+ if (!qlink_tlv_parsing_ok(tlv, sr->payload, payload_len))
return -EINVAL;
bss = cfg80211_inform_bss(wiphy, channel, frame_type,
@@ -662,18 +633,20 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
int ret = -1;
u16 event_id;
u16 event_len;
+ u8 vifid;
event = (const struct qlink_event *)event_skb->data;
event_id = le16_to_cpu(event->event_id);
event_len = le16_to_cpu(event->mhdr.len);
- if (likely(event->vifid < QTNF_MAX_INTF)) {
- vif = &mac->iflist[event->vifid];
- } else {
+ if (event->vifid >= QTNF_MAX_INTF) {
pr_err("invalid vif(%u)\n", event->vifid);
return -EINVAL;
}
+ vifid = array_index_nospec(event->vifid, QTNF_MAX_INTF);
+ vif = &mac->iflist[vifid];
+
switch (event_id) {
case QLINK_EVENT_STA_ASSOCIATED:
ret = qtnf_event_handle_sta_assoc(mac, vif, (const void *)event,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 8e0d8018208a..dbb241106d8a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -593,7 +593,7 @@ static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
priv->tx_bd_w_index = i;
tx_done:
- if (ret && skb) {
+ if (ret) {
pr_err_ratelimited("drop skb\n");
if (skb->dev)
skb->dev->stats.tx_dropped++;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index b2edb03819d1..7ee1070f985f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,20 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 16
+#define QLINK_PROTO_VER_MAJOR_M 0xFFFF
+#define QLINK_PROTO_VER_MAJOR_S 16
+#define QLINK_PROTO_VER_MINOR_M 0xFFFF
+#define QLINK_VER_MINOR(_ver) ((_ver) & QLINK_PROTO_VER_MINOR_M)
+#define QLINK_VER_MAJOR(_ver) \
+ (((_ver) >> QLINK_PROTO_VER_MAJOR_S) & QLINK_PROTO_VER_MAJOR_M)
+#define QLINK_VER(_maj, _min) (((_maj) << QLINK_PROTO_VER_MAJOR_S) | (_min))
+
+#define QLINK_PROTO_VER_MAJOR 18
+#define QLINK_PROTO_VER_MINOR 0
+#define QLINK_PROTO_VER \
+ QLINK_VER(QLINK_PROTO_VER_MAJOR, QLINK_PROTO_VER_MINOR)
+
+#define QLINK_ALIGN 4
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -62,15 +75,24 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_HW_BRIDGE: device has hardware switch capabilities.
*/
enum qlink_hw_capab {
- QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
- QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
- QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
- QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
- QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
- QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
- QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
- QLINK_HW_CAPAB_SAE = BIT(8),
- QLINK_HW_CAPAB_HW_BRIDGE = BIT(9),
+ QLINK_HW_CAPAB_REG_UPDATE = 0,
+ QLINK_HW_CAPAB_STA_INACT_TIMEOUT,
+ QLINK_HW_CAPAB_DFS_OFFLOAD,
+ QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR,
+ QLINK_HW_CAPAB_PWR_MGMT,
+ QLINK_HW_CAPAB_OBSS_SCAN,
+ QLINK_HW_CAPAB_SCAN_DWELL,
+ QLINK_HW_CAPAB_SAE,
+ QLINK_HW_CAPAB_HW_BRIDGE,
+ QLINK_HW_CAPAB_NUM
+};
+
+/**
+ * enum qlink_driver_capab - host driver capabilities.
+ *
+ */
+enum qlink_driver_capab {
+ QLINK_DRV_CAPAB_NUM = 0
};
enum qlink_iface_type {
@@ -164,7 +186,7 @@ struct qlink_chandef {
__le16 center_freq1;
__le16 center_freq2;
u8 width;
- u8 rsvd;
+ u8 rsvd[3];
} __packed;
#define QLINK_MAX_NR_CIPHER_SUITES 5
@@ -269,7 +291,6 @@ enum qlink_cmd_type {
QLINK_CMD_REGISTER_MGMT = 0x0003,
QLINK_CMD_SEND_FRAME = 0x0004,
QLINK_CMD_MGMT_SET_APPIE = 0x0005,
- QLINK_CMD_PHY_PARAMS_GET = 0x0011,
QLINK_CMD_PHY_PARAMS_SET = 0x0012,
QLINK_CMD_GET_HW_INFO = 0x0013,
QLINK_CMD_MAC_INFO = 0x0014,
@@ -321,9 +342,26 @@ struct qlink_cmd {
struct qlink_msg_header mhdr;
__le16 cmd_id;
__le16 seq_num;
- u8 rsvd[2];
u8 macid;
u8 vifid;
+ u8 rsvd[2];
+} __packed;
+
+/**
+ * struct qlink_cmd_init_fw - data for QLINK_CMD_FW_INIT
+ *
+ * Initialize firmware based on specified host configuration. This is the first
+ * command sent to wifi card and it's fixed part should never be changed, any
+ * additions must be done by appending TLVs.
+ * If wifi card can not operate with a specified parameters it will return
+ * error.
+ *
+ * @qlink_proto_ver: QLINK protocol version used by host driver.
+ */
+struct qlink_cmd_init_fw {
+ struct qlink_cmd chdr;
+ __le32 qlink_proto_ver;
+ u8 var_info[0];
} __packed;
/**
@@ -368,6 +406,7 @@ struct qlink_cmd_mgmt_frame_register {
struct qlink_cmd chdr;
__le16 frame_type;
u8 do_register;
+ u8 rsvd[1];
} __packed;
/**
@@ -405,6 +444,7 @@ struct qlink_cmd_frame_tx {
struct qlink_cmd_get_sta_info {
struct qlink_cmd chdr;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
} __packed;
/**
@@ -424,6 +464,7 @@ struct qlink_cmd_add_key {
u8 addr[ETH_ALEN];
__le32 cipher;
__le16 vlanid;
+ u8 rsvd[2];
u8 key_data[0];
} __packed;
@@ -453,6 +494,7 @@ struct qlink_cmd_set_def_key {
u8 key_index;
u8 unicast;
u8 multicast;
+ u8 rsvd[1];
} __packed;
/**
@@ -463,6 +505,7 @@ struct qlink_cmd_set_def_key {
struct qlink_cmd_set_def_mgmt_key {
struct qlink_cmd chdr;
u8 key_index;
+ u8 rsvd[3];
} __packed;
/**
@@ -479,6 +522,7 @@ struct qlink_cmd_change_sta {
__le16 if_type;
__le16 vlanid;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
} __packed;
/**
@@ -489,8 +533,9 @@ struct qlink_cmd_change_sta {
struct qlink_cmd_del_sta {
struct qlink_cmd chdr;
__le16 reason_code;
- u8 subtype;
u8 sta_addr[ETH_ALEN];
+ u8 subtype;
+ u8 rsvd[3];
} __packed;
enum qlink_sta_connect_flags {
@@ -557,6 +602,7 @@ struct qlink_cmd_external_auth {
struct qlink_cmd_disconnect {
struct qlink_cmd chdr;
__le16 reason;
+ u8 rsvd[2];
} __packed;
/**
@@ -568,6 +614,7 @@ struct qlink_cmd_disconnect {
struct qlink_cmd_updown {
struct qlink_cmd chdr;
u8 if_up;
+ u8 rsvd[3];
} __packed;
/**
@@ -591,16 +638,17 @@ enum qlink_band {
struct qlink_cmd_band_info_get {
struct qlink_cmd chdr;
u8 band;
+ u8 rsvd[3];
} __packed;
/**
* struct qlink_cmd_get_chan_stats - data for QLINK_CMD_CHAN_STATS command
*
- * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J
+ * @channel_freq: channel center frequency
*/
struct qlink_cmd_get_chan_stats {
struct qlink_cmd chdr;
- __le16 channel;
+ __le32 channel_freq;
} __packed;
/**
@@ -653,19 +701,33 @@ struct qlink_cmd_reg_notify {
} __packed;
/**
+ * enum qlink_chan_sw_flags - channel switch control flags
+ *
+ * @QLINK_CHAN_SW_RADAR_REQUIRED: whether radar detection is required on a new
+ * channel.
+ * @QLINK_CHAN_SW_BLOCK_TX: whether transmissions should be blocked while
+ * changing a channel.
+ */
+enum qlink_chan_sw_flags {
+ QLINK_CHAN_SW_RADAR_REQUIRED = BIT(0),
+ QLINK_CHAN_SW_BLOCK_TX = BIT(1),
+};
+
+/**
* struct qlink_cmd_chan_switch - data for QLINK_CMD_CHAN_SWITCH command
*
- * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J
- * @radar_required: whether radar detection is required on the new channel
- * @block_tx: whether transmissions should be blocked while changing
+ * @channel: channel to switch to.
+ * @flags: flags to control channel switch, bitmap of &enum qlink_chan_sw_flags.
* @beacon_count: number of beacons until switch
*/
struct qlink_cmd_chan_switch {
struct qlink_cmd chdr;
- __le16 channel;
- u8 radar_required;
- u8 block_tx;
+ struct qlink_chandef channel;
+ __le64 flags;
+ __le32 n_counter_offsets_beacon;
+ __le32 n_counter_offsets_presp;
u8 beacon_count;
+ u8 rsvd[3];
} __packed;
/**
@@ -769,6 +831,7 @@ struct qlink_cmd_pm_set {
struct qlink_cmd chdr;
__le32 pm_standby_timer;
u8 pm_mode;
+ u8 rsvd[3];
} __packed;
/**
@@ -857,6 +920,46 @@ struct qlink_cmd_ndev_changeupper {
u8 rsvd[1];
} __packed;
+/**
+ * enum qlink_scan_flags - scan request control flags
+ *
+ * Scan flags are used to control QLINK_CMD_SCAN behavior.
+ *
+ * @QLINK_SCAN_FLAG_FLUSH: flush cache before scanning.
+ */
+enum qlink_scan_flags {
+ QLINK_SCAN_FLAG_FLUSH = BIT(0),
+ QLINK_SCAN_FLAG_DURATION_MANDATORY = BIT(1),
+};
+
+/**
+ * struct qlink_cmd_scan - data for QLINK_CMD_SCAN command
+ *
+ * @flags: scan flags, a bitmap of &enum qlink_scan_flags.
+ * @n_ssids: number of WLAN_EID_SSID TLVs expected in variable portion of the
+ * command.
+ * @n_channels: number of QTN_TLV_ID_CHANNEL TLVs expected in variable payload.
+ * @active_dwell: time spent on a single channel for an active scan.
+ * @passive_dwell: time spent on a single channel for a passive scan.
+ * @sample_duration: total duration of sampling a single channel during a scan
+ * including off-channel dwell time and operating channel time.
+ * @bssid: specific BSSID to scan for or a broadcast BSSID.
+ * @scan_width: channel width to use, one of &enum qlink_channel_width.
+ */
+struct qlink_cmd_scan {
+ struct qlink_cmd chdr;
+ __le64 flags;
+ __le16 n_ssids;
+ __le16 n_channels;
+ __le16 active_dwell;
+ __le16 passive_dwell;
+ __le16 sample_duration;
+ u8 bssid[ETH_ALEN];
+ u8 scan_width;
+ u8 rsvd[3];
+ u8 var_info[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -896,6 +999,16 @@ struct qlink_resp {
} __packed;
/**
+ * struct qlink_resp_init_fw - response for QLINK_CMD_FW_INIT
+ *
+ * @qlink_proto_ver: QLINK protocol version used by wifi card firmware.
+ */
+struct qlink_resp_init_fw {
+ struct qlink_resp rhdr;
+ __le32 qlink_proto_ver;
+} __packed;
+
+/**
* enum qlink_dfs_regions - regulatory DFS regions
*
* Corresponds to &enum nl80211_dfs_regions.
@@ -919,6 +1032,7 @@ enum qlink_dfs_regions {
* @num_rx_chain: Number of receive chains used by WMAC.
* @vht_cap_mod_mask: mask specifying which VHT capabilities can be altered.
* @ht_cap_mod_mask: mask specifying which HT capabilities can be altered.
+ * @max_scan_ssids: maximum number of SSIDs the device can scan for in any scan.
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
@@ -935,14 +1049,48 @@ struct qlink_resp_get_mac_info {
u8 num_rx_chain;
struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_ht_cap ht_cap_mod_mask;
+
__le16 max_ap_assoc_sta;
+ __le32 hw_version;
+ __le32 probe_resp_offload;
+ __le32 bss_select_support;
+ __le16 n_addresses;
__le16 radar_detect_widths;
- __le32 max_acl_mac_addrs;
+ __le16 max_remain_on_channel_duration;
+ __le16 max_acl_mac_addrs;
+
+ __le32 frag_threshold;
+ __le32 rts_threshold;
+ u8 retry_short;
+ u8 retry_long;
+ u8 coverage_class;
+
+ u8 max_scan_ssids;
+ u8 max_sched_scan_reqs;
+ u8 max_sched_scan_ssids;
+ u8 max_match_sets;
+ u8 max_adj_channel_rssi_comp;
+
+ __le16 max_scan_ie_len;
+ __le16 max_sched_scan_ie_len;
+ __le32 max_sched_scan_plans;
+ __le32 max_sched_scan_plan_interval;
+ __le32 max_sched_scan_plan_iterations;
+
+ u8 n_cipher_suites;
+ u8 n_akm_suites;
+ u8 max_num_pmkids;
+ u8 num_iftype_ext_capab;
+ u8 extended_capabilities_len;
+ u8 max_data_retry_count;
+ u8 n_iface_combinations;
+ u8 max_num_csa_counters;
+
u8 bands_cap;
u8 alpha2[2];
u8 n_reg_rules;
u8 dfs_region;
- u8 rsvd[1];
+ u8 rsvd[3];
u8 var_info[0];
} __packed;
@@ -952,8 +1100,6 @@ struct qlink_resp_get_mac_info {
* Description of wireless hardware capabilities and features.
*
* @fw_ver: wireless hardware firmware version.
- * @hw_capab: Bitmap of capabilities supported by firmware.
- * @ql_proto_ver: Version of QLINK protocol used by firmware.
* @num_mac: Number of separate physical radio devices provided by hardware.
* @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
* @total_tx_chains: total number of transmit chains used by device.
@@ -963,11 +1109,9 @@ struct qlink_resp_get_mac_info {
struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
__le32 fw_ver;
- __le32 hw_capab;
__le32 bld_tmstamp;
__le32 plat_id;
__le32 hw_ver;
- __le16 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
u8 total_tx_chain;
@@ -1001,8 +1145,6 @@ enum qlink_sta_info_rate_flags {
*
* Response data containing statistics for specified STA.
*
- * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
- * is valid.
* @sta_addr: MAC address of STA the response carries statistic for.
* @info: variable statistics for specified STA.
*/
@@ -1031,22 +1173,14 @@ struct qlink_resp_band_info_get {
} __packed;
/**
- * struct qlink_resp_phy_params - response for QLINK_CMD_PHY_PARAMS_GET command
- *
- * @info: variable-length array of PHY params.
- */
-struct qlink_resp_phy_params {
- struct qlink_resp rhdr;
- u8 info[0];
-} __packed;
-
-/**
* struct qlink_resp_get_chan_stats - response for QLINK_CMD_CHAN_STATS cmd
*
+ * @chan_freq: center frequency for a channel the report is sent for.
* @info: variable-length channel info.
*/
struct qlink_resp_get_chan_stats {
- struct qlink_cmd rhdr;
+ struct qlink_resp rhdr;
+ __le32 chan_freq;
u8 info[0];
} __packed;
@@ -1158,6 +1292,7 @@ struct qlink_event_bss_join {
struct qlink_event_bss_leave {
struct qlink_event ehdr;
__le16 reason;
+ u8 rsvd[2];
} __packed;
/**
@@ -1274,10 +1409,10 @@ struct qlink_event_radar {
*/
struct qlink_event_external_auth {
struct qlink_event ehdr;
+ __le32 akm_suite;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
u8 bssid[ETH_ALEN];
- __le32 akm_suite;
+ u8 ssid_len;
u8 action;
} __packed;
@@ -1301,20 +1436,17 @@ struct qlink_event_mic_failure {
/**
* enum qlink_tlv_id - list of TLVs that Qlink messages can carry
*
- * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
- * indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_BITMAP: a data representing a bitmap that is used together with
+ * other TLVs:
+ * &enum qlink_sta_info used to indicate which statistic carried in
+ * QTN_TLV_ID_STA_STATS is valid.
+ * &enum qlink_hw_capab listing wireless card capabilities.
+ * &enum qlink_driver_capab listing driver/host system capabilities.
+ * &enum qlink_chan_stat used to indicate which statistic carried in
+ * QTN_TLV_ID_CHANNEL_STATS is valid.
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
- * carried by QTN_TLV_ID_STA_STATS_MAP.
- * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
- * for in any given scan.
- * @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active
- * scan.
- * @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive
- * scan.
- * @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
- * during a scan including off-channel dwell time and operating channel
- * time.
+ * carried by QTN_TLV_ID_BITMAP.
* @QTN_TLV_ID_IFTYPE_DATA: supported band data.
*/
enum qlink_tlv_id {
@@ -1325,11 +1457,10 @@ enum qlink_tlv_id {
QTN_TLV_ID_REG_RULE = 0x0207,
QTN_TLV_ID_CHANNEL = 0x020F,
QTN_TLV_ID_CHANDEF = 0x0210,
- QTN_TLV_ID_STA_STATS_MAP = 0x0211,
+ QTN_TLV_ID_BITMAP = 0x0211,
QTN_TLV_ID_STA_STATS = 0x0212,
QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
QTN_TLV_ID_IFACE_LIMIT = 0x0214,
- QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
QTN_TLV_ID_CHANNEL_STATS = 0x0216,
QTN_TLV_ID_KEY = 0x0302,
QTN_TLV_ID_SEQ = 0x0303,
@@ -1344,13 +1475,8 @@ enum qlink_tlv_id {
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
- QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
- QTN_TLV_ID_SCAN_FLUSH = 0x0412,
- QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
- QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
- QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
QTN_TLV_ID_IFTYPE_DATA = 0x0418,
};
@@ -1360,10 +1486,6 @@ struct qlink_tlv_hdr {
u8 val[0];
} __packed;
-struct qlink_iface_comb_num {
- __le32 iface_comb_num;
-} __packed;
-
struct qlink_iface_limit {
__le16 max_num;
__le16 type;
@@ -1378,21 +1500,6 @@ struct qlink_iface_limit_record {
#define QLINK_RSSI_OFFSET 120
-struct qlink_tlv_frag_rts_thr {
- struct qlink_tlv_hdr hdr;
- __le32 thr;
-} __packed;
-
-struct qlink_tlv_rlimit {
- struct qlink_tlv_hdr hdr;
- u8 rlimit;
-} __packed;
-
-struct qlink_tlv_cclass {
- struct qlink_tlv_hdr hdr;
- u8 cclass;
-} __packed;
-
/**
* enum qlink_reg_rule_flags - regulatory rule flags
*
@@ -1510,6 +1617,7 @@ struct qlink_tlv_ie_set {
struct qlink_tlv_hdr hdr;
u8 type;
u8 flags;
+ u8 rsvd[2];
u8 ie_data[0];
} __packed;
@@ -1522,6 +1630,7 @@ struct qlink_tlv_ie_set {
struct qlink_tlv_ext_ie {
struct qlink_tlv_hdr hdr;
u8 eid_ext;
+ u8 rsvd[3];
u8 ie_data[0];
} __packed;
@@ -1546,13 +1655,57 @@ struct qlink_tlv_iftype_data {
struct qlink_sband_iftype_data iftype_data[0];
} __packed;
+/**
+ * enum qlink_chan_stat - channel statistics bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_chan_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_BITMAP.
+ *
+ * @QLINK_CHAN_STAT_TIME_ON: time_on value is valid.
+ * @QLINK_CHAN_STAT_TIME_TX: time_tx value is valid.
+ * @QLINK_CHAN_STAT_TIME_RX: time_rx value is valid.
+ * @QLINK_CHAN_STAT_CCA_BUSY: cca_busy value is valid.
+ * @QLINK_CHAN_STAT_CCA_BUSY_EXT: cca_busy_ext value is valid.
+ * @QLINK_CHAN_STAT_TIME_SCAN: time_scan value is valid.
+ * @QLINK_CHAN_STAT_CHAN_NOISE: chan_noise value is valid.
+ */
+enum qlink_chan_stat {
+ QLINK_CHAN_STAT_TIME_ON,
+ QLINK_CHAN_STAT_TIME_TX,
+ QLINK_CHAN_STAT_TIME_RX,
+ QLINK_CHAN_STAT_CCA_BUSY,
+ QLINK_CHAN_STAT_CCA_BUSY_EXT,
+ QLINK_CHAN_STAT_TIME_SCAN,
+ QLINK_CHAN_STAT_CHAN_NOISE,
+ QLINK_CHAN_STAT_NUM,
+};
+
+/**
+ * struct qlink_chan_stats - data for QTN_TLV_ID_CHANNEL_STATS
+ *
+ * Carries a per-channel statistics. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_chan_stat. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_BITMAP.
+ *
+ * @time_on: amount of time radio operated on that channel.
+ * @time_tx: amount of time radio spent transmitting on the channel.
+ * @time_rx: amount of time radio spent receiving on the channel.
+ * @cca_busy: amount of time the the primary channel was busy.
+ * @cca_busy_ext: amount of time the the secondary channel was busy.
+ * @time_scan: amount of radio spent scanning on the channel.
+ * @chan_noise: channel noise.
+ */
struct qlink_chan_stats {
- __le32 chan_num;
- __le32 cca_tx;
- __le32 cca_rx;
- __le32 cca_busy;
- __le32 cca_try;
+ __le64 time_on;
+ __le64 time_tx;
+ __le64 time_rx;
+ __le64 cca_busy;
+ __le64 cca_busy_ext;
+ __le64 time_scan;
s8 chan_noise;
+ u8 rsvd[3];
} __packed;
/**
@@ -1560,7 +1713,7 @@ struct qlink_chan_stats {
*
* Used to indicate which statistics values in &struct qlink_sta_stats
* are valid. Individual values are used to fill a bitmap carried in a
- * payload of QTN_TLV_ID_STA_STATS_MAP.
+ * payload of QTN_TLV_ID_BITMAP.
*
* @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
* @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
@@ -1624,7 +1777,7 @@ struct qlink_sta_info_rate {
* Carries statistics of a STA. Not all fields may be filled with
* valid values. Valid fields should be indicated as such using a bitmap of
* &enum qlink_sta_info. Bitmap is carried separately in a payload of
- * QTN_TLV_ID_STA_STATS_MAP.
+ * QTN_TLV_ID_BITMAP.
*/
struct qlink_sta_stats {
__le64 rx_bytes;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index f873beed2ae7..230a10a41c7a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -20,42 +20,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
u16 tlv_id, const u8 arr[],
size_t arr_len)
{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + arr_len);
+ struct qlink_tlv_hdr *hdr;
+ hdr = skb_put(skb, sizeof(*hdr) + round_up(arr_len, QLINK_ALIGN));
hdr->type = cpu_to_le16(tlv_id);
hdr->len = cpu_to_le16(arr_len);
memcpy(hdr->val, arr, arr_len);
}
-static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(0);
-}
-
-static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
- u8 value)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(sizeof(value));
- *hdr->val = value;
-}
-
-static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
- u16 tlv_id, u16 value)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
- __le16 tmp = cpu_to_le16(value);
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(sizeof(value));
- memcpy(hdr->val, &tmp, sizeof(tmp));
-}
-
static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb,
u16 tlv_id, u32 value)
{
@@ -85,4 +57,17 @@ u32 qlink_utils_chflags_cfg2q(u32 cfgflags);
void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
const struct qlink_tlv_reg_rule *tlv_rule);
+#define qlink_for_each_tlv(_tlv, _start, _datalen) \
+ for (_tlv = (const struct qlink_tlv_hdr *)(_start); \
+ (const u8 *)(_start) + (_datalen) - (const u8 *)_tlv >= \
+ (int)sizeof(*_tlv) && \
+ (const u8 *)(_start) + (_datalen) - (const u8 *)_tlv >= \
+ (int)sizeof(*_tlv) + le16_to_cpu(_tlv->len); \
+ _tlv = (const struct qlink_tlv_hdr *)(_tlv->val + \
+ round_up(le16_to_cpu(_tlv->len), QLINK_ALIGN)))
+
+#define qlink_tlv_parsing_ok(_tlv_last, _start, _datalen) \
+ ((const u8 *)(_tlv_last) == \
+ (const u8 *)(_start) + round_up(_datalen, QLINK_ALIGN))
+
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index fda771d23f71..b6d1d71f4d30 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -41,7 +41,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta_vht_cap *ic_vht_cap;
const u8 *bssid = bss_conf->bssid;
u32 sound_dim;
- u8 bfee_role = RTW_BFEE_NONE;
u8 i;
if (!(chip->band & RTW_BAND_5G))
@@ -67,7 +66,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
ether_addr_copy(bfee->mac_addr, bssid);
- bfee_role = RTW_BFEE_MU;
+ bfee->role = RTW_BFEE_MU;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
bfee->aid = bss_conf->aid;
bfinfo->bfer_mu_cnt++;
@@ -85,7 +84,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
ether_addr_copy(bfee->mac_addr, bssid);
- bfee_role = RTW_BFEE_SU;
+ bfee->role = RTW_BFEE_SU;
bfee->sound_dim = (u8)sound_dim;
bfee->g_id = 0;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
@@ -102,7 +101,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
out_unlock:
- bfee->role = bfee_role;
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 4dfb2ec395ee..f91dc21a8bf1 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -1904,6 +1904,9 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
lockdep_assert_held(&rtwdev->mutex);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ return;
+
coex_dm->reason = reason;
/* update wifi_link_info_ext variable */
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 243441453ead..6867bf29d4c8 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -136,6 +136,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
mutex_lock(&rtwdev->mutex);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ goto unlock;
+
switch (c2h->id) {
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
@@ -153,6 +156,7 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
break;
}
+unlock:
mutex_unlock(&rtwdev->mutex);
}
@@ -745,7 +749,7 @@ static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
if (!loc) {
rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
- kfree(skb);
+ kfree_skb(skb);
return NULL;
}
nlo_hdr->location[i] = loc;
@@ -819,8 +823,7 @@ static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
return skb;
}
-static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -876,7 +879,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
skb_new = rtw_lps_pg_dpk_get(hw);
break;
case RSVD_LPS_PG_INFO:
- skb_new = rtw_lps_pg_info_get(hw, vif);
+ skb_new = rtw_lps_pg_info_get(hw);
break;
case RSVD_PROBE_REQ:
ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 85a81a578fd5..cad56389182c 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -193,6 +193,32 @@ rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
return ret;
}
+static inline u16
+rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read16(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline u8
+rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read8(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
static inline void
rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index cadf0abbe16b..7b245779ff90 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -17,10 +17,10 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
txsc20 = primary_ch_idx;
if (bw == RTW_CHANNEL_WIDTH_80) {
- if (txsc20 == 1 || txsc20 == 3)
- txsc40 = 9;
+ if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
+ txsc40 = RTW_SC_40_UPPER;
else
- txsc40 = 10;
+ txsc40 = RTW_SC_40_LOWER;
}
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
@@ -101,7 +101,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
}
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
- struct rtw_pwr_seq_cmd *cmd)
+ const struct rtw_pwr_seq_cmd *cmd)
{
u8 value;
u8 flag = 0;
@@ -139,9 +139,10 @@ static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
}
static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
- u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
+ u8 cut_mask,
+ const struct rtw_pwr_seq_cmd *cmd)
{
- struct rtw_pwr_seq_cmd *cur_cmd;
+ const struct rtw_pwr_seq_cmd *cur_cmd;
u32 offset;
u8 value;
@@ -183,13 +184,13 @@ static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
}
static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
- struct rtw_pwr_seq_cmd **cmd_seq)
+ const struct rtw_pwr_seq_cmd **cmd_seq)
{
u8 cut_mask;
u8 intf_mask;
u8 cut;
u32 idx = 0;
- struct rtw_pwr_seq_cmd *cmd;
+ const struct rtw_pwr_seq_cmd *cmd;
int ret;
cut = rtwdev->hal.cut_version;
@@ -223,7 +224,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_pwr_seq_cmd **pwr_seq;
+ const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
@@ -705,7 +706,7 @@ dlfw_fail:
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
- struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
u32 prio_queues = 0;
if (queues & BIT(IEEE80211_AC_VO))
@@ -793,7 +794,7 @@ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_rqpn *rqpn = NULL;
+ const struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
switch (rtw_hci_type(rtwdev)) {
@@ -882,7 +883,7 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_page_table *pg_tbl = NULL;
+ const struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 6fc33e11d08c..b3125e311fa2 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -75,15 +75,12 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw_leave_lps_deep(rtwdev);
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- if (hw->conf.flags & IEEE80211_CONF_IDLE) {
- rtw_enter_ips(rtwdev);
- } else {
- ret = rtw_leave_ips(rtwdev);
- if (ret) {
- rtw_err(rtwdev, "failed to leave idle state\n");
- goto out;
- }
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ !(hw->conf.flags & IEEE80211_CONF_IDLE)) {
+ ret = rtw_leave_ips(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave idle state\n");
+ goto out;
}
}
@@ -99,6 +96,10 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (hw->conf.flags & IEEE80211_CONF_IDLE))
+ rtw_enter_ips(rtwdev);
+
out:
mutex_unlock(&rtwdev->mutex);
return ret;
@@ -514,6 +515,9 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
/* suppress error messages */
return -EOPNOTSUPP;
default:
@@ -592,6 +596,20 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
+static bool rtw_ops_can_aggregate_in_amsdu(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ /* we don't want to enable TX AMSDU on 2.4G */
+ if (hal->current_band_type == RTW_BAND_2G)
+ return false;
+
+ return true;
+}
+
static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
@@ -787,6 +805,7 @@ const struct ieee80211_ops rtw_ops = {
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
.ampdu_action = rtw_ops_ampdu_action,
+ .can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu,
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 2845d2838f7b..2f73820cd9ba 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -317,15 +317,15 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
- primary_chan_idx = 0;
+ primary_chan_idx = RTW_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
- primary_chan_idx = 1;
+ primary_chan_idx = RTW_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = 2;
+ primary_chan_idx = RTW_SC_20_LOWER;
center_chan += 2;
}
break;
@@ -333,10 +333,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
if (primary_freq - center_freq == 10) {
- primary_chan_idx = 1;
+ primary_chan_idx = RTW_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = 3;
+ primary_chan_idx = RTW_SC_20_UPMOST;
center_chan -= 6;
}
/* assign the center channel used
@@ -345,10 +345,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
if (center_freq - primary_freq == 10) {
- primary_chan_idx = 2;
+ primary_chan_idx = RTW_SC_20_LOWER;
center_chan += 2;
} else {
- primary_chan_idx = 4;
+ primary_chan_idx = RTW_SC_20_LOWEST;
center_chan += 6;
}
/* assign the center channel used
@@ -909,11 +909,16 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
clear_bit(RTW_FLAG_RUNNING, rtwdev->flags);
clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
+ mutex_unlock(&rtwdev->mutex);
+
+ cancel_work_sync(&rtwdev->c2h_work);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
cancel_delayed_work_sync(&coex->bt_reenable_work);
cancel_delayed_work_sync(&coex->defreeze_work);
+ mutex_lock(&rtwdev->mutex);
+
rtw_power_off(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index f334d201bfb5..c074cef22120 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -99,6 +99,16 @@ enum rtw_bandwidth {
RTW_CHANNEL_WIDTH_10 = 6,
};
+enum rtw_sc_offset {
+ RTW_SC_DONT_CARE = 0,
+ RTW_SC_20_UPPER = 1,
+ RTW_SC_20_LOWER = 2,
+ RTW_SC_20_UPMOST = 3,
+ RTW_SC_20_LOWEST = 4,
+ RTW_SC_40_UPPER = 9,
+ RTW_SC_40_LOWER = 10,
+};
+
enum rtw_net_type {
RTW_NET_NO_LINK = 0,
RTW_NET_AD_HOC = 1,
@@ -948,10 +958,10 @@ struct rtw_wow_param {
};
struct rtw_intf_phy_para_table {
- struct rtw_intf_phy_para *usb2_para;
- struct rtw_intf_phy_para *usb3_para;
- struct rtw_intf_phy_para *gen1_para;
- struct rtw_intf_phy_para *gen2_para;
+ const struct rtw_intf_phy_para *usb2_para;
+ const struct rtw_intf_phy_para *usb3_para;
+ const struct rtw_intf_phy_para *gen1_para;
+ const struct rtw_intf_phy_para *gen2_para;
u8 n_usb2_para;
u8 n_usb3_para;
u8 n_gen1_para;
@@ -1048,13 +1058,13 @@ struct rtw_chip_info {
/* init values */
u8 sys_func_en;
- struct rtw_pwr_seq_cmd **pwr_on_seq;
- struct rtw_pwr_seq_cmd **pwr_off_seq;
- struct rtw_rqpn *rqpn_table;
- struct rtw_page_table *page_table;
- struct rtw_intf_phy_para_table *intf_table;
+ const struct rtw_pwr_seq_cmd **pwr_on_seq;
+ const struct rtw_pwr_seq_cmd **pwr_off_seq;
+ const struct rtw_rqpn *rqpn_table;
+ const struct rtw_page_table *page_table;
+ const struct rtw_intf_phy_para_table *intf_table;
- struct rtw_hw_reg *dig;
+ const struct rtw_hw_reg *dig;
u32 rf_base_addr[2];
u32 rf_sipi_addr[2];
@@ -1500,7 +1510,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
- struct rtw_rqpn *rqpn;
+ const struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 1fbc14c149ec..7c525bb0337d 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1197,11 +1197,18 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
{
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u16 link_ctrl;
int ret;
+ /* RTL8822CE has enabled REFCLK auto calibration, it does not need
+ * to add clock delay to cover the REFCLK timing gap.
+ */
+ if (chip->id == RTW_CHIP_TYPE_8822C)
+ rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
+
/* Though there is standard PCIE configuration space to set the
* link control register, but by Realtek's design, driver should
* check if host supports CLKREQ/ASPM to enable the HW module.
@@ -1248,7 +1255,7 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_intf_phy_para *para;
+ const struct rtw_intf_phy_para *para;
u16 cut;
u16 value;
u16 offset;
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 1580cfc57361..cd4fcd064cdb 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -39,6 +39,7 @@
#define RTK_PCIE_LINK_CFG 0x0719
#define BIT_CLKREQ_SW_EN BIT(4)
#define BIT_L1_SW_EN BIT(3)
+#define RTK_PCIE_CLKDLY_CTRL 0x0725
#define BIT_PCI_BCNQ_FLAG BIT(4)
#define RTK_PCI_TXBD_DESA_BCNQ 0x308
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index eea9d888fbf1..8793dd22188f 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -749,20 +749,10 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
- if (addr == RF_CFGCH) {
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
- }
-
rtw_write32_mask(rtwdev, direct_addr, mask, data);
udelay(1);
- if (addr == RF_CFGCH) {
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
- }
-
return true;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 4bc14b1a6340..96aa332fb28d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -645,7 +645,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
break;
case RTW_CHANNEL_WIDTH_40:
- if (primary_ch_idx == 1)
+ if (primary_ch_idx == RTW_SC_20_UPPER)
rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
else
rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
@@ -1543,7 +1543,7 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
rtw_warn(rtwdev, "wrong bfee role\n");
}
-static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1581,7 +1581,7 @@ static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
{0x0012,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -1714,7 +1714,7 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
{0x0003,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1787,7 +1787,7 @@ static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1905,26 +1905,26 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
trans_carddis_to_cardemu_8822b,
trans_cardemu_to_act_8822b,
NULL
};
-static struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
trans_act_to_cardemu_8822b,
trans_cardemu_to_carddis_8822b,
NULL
};
-static struct rtw_intf_phy_para usb2_param_8822b[] = {
+static const struct rtw_intf_phy_para usb2_param_8822b[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para usb3_param_8822b[] = {
+static const struct rtw_intf_phy_para usb3_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_D,
@@ -1935,7 +1935,7 @@ static struct rtw_intf_phy_para usb3_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
+static const struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
@@ -1982,7 +1982,7 @@ static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
+static const struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
@@ -2029,7 +2029,7 @@ static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para_table phy_para_table_8822b = {
+static const struct rtw_intf_phy_para_table phy_para_table_8822b = {
.usb2_para = usb2_param_8822b,
.usb3_para = usb3_param_8822b,
.gen1_para = pcie_gen1_param_8822b,
@@ -2046,12 +2046,12 @@ static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
-static struct rtw_hw_reg rtw8822b_dig[] = {
+static const struct rtw_hw_reg rtw8822b_dig[] = {
[0] = { .addr = 0xc50, .mask = 0x7f },
[1] = { .addr = 0xe50, .mask = 0x7f },
};
-static struct rtw_page_table page_table_8822b[] = {
+static const struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
@@ -2059,7 +2059,7 @@ static struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
};
-static struct rtw_rqpn rqpn_table_8822b[] = {
+static const struct rtw_rqpn rqpn_table_8822b[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 3865097696d4..146f693c7592 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -1289,6 +1289,17 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
return 0;
}
+static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
+{
+ if (enable) {
+ rtw_write32_mask(rtwdev, REG_RSTB, BIT_RSTB_3WIRE, 0x1);
+ rtw_write32_mask(rtwdev, REG_ANAPAR_A, BIT_ANAPAR_UPDATE, 0x1);
+ rtw_write32_mask(rtwdev, REG_ANAPAR_B, BIT_ANAPAR_UPDATE, 0x1);
+ } else {
+ rtw_write32_mask(rtwdev, REG_RSTB, BIT_RSTB_3WIRE, 0x0);
+ }
+}
+
static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
{
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
@@ -1337,6 +1348,8 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
break;
}
+ rtw8822c_rstb_3wire(rtwdev, false);
+
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x01);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, 0x1f, 0x12);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, 0xfffff, rf_rxbb);
@@ -1349,6 +1362,8 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_reg18);
rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_reg18);
+
+ rtw8822c_rstb_3wire(rtwdev, true);
}
static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
@@ -1482,7 +1497,7 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
break;
case RTW_CHANNEL_WIDTH_40:
rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
- (primary_ch_idx == 1 ? 1 : 0));
+ (primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0));
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x5);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
@@ -3399,7 +3414,7 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
-static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -3442,7 +3457,7 @@ static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
@@ -3544,6 +3559,11 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x1064,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -3551,7 +3571,7 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
{0x0093,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -3614,7 +3634,7 @@ static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -3677,47 +3697,47 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
trans_carddis_to_cardemu_8822c,
trans_cardemu_to_act_8822c,
NULL
};
-static struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
trans_act_to_cardemu_8822c,
trans_cardemu_to_carddis_8822c,
NULL
};
-static struct rtw_intf_phy_para usb2_param_8822c[] = {
+static const struct rtw_intf_phy_para usb2_param_8822c[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para usb3_param_8822c[] = {
+static const struct rtw_intf_phy_para usb3_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
+static const struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
+static const struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para_table phy_para_table_8822c = {
+static const struct rtw_intf_phy_para_table phy_para_table_8822c = {
.usb2_para = usb2_param_8822c,
.usb3_para = usb3_param_8822c,
.gen1_para = pcie_gen1_param_8822c,
@@ -3734,12 +3754,12 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822c, 0, 0),
};
-static struct rtw_hw_reg rtw8822c_dig[] = {
+static const struct rtw_hw_reg rtw8822c_dig[] = {
[0] = { .addr = 0x1d70, .mask = 0x7f },
[1] = { .addr = 0x1d70, .mask = 0x7f00 },
};
-static struct rtw_page_table page_table_8822c[] = {
+static const struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
@@ -3747,7 +3767,7 @@ static struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
};
-static struct rtw_rqpn rqpn_table_8822c[] = {
+static const struct rtw_rqpn rqpn_table_8822c[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index abd9f300bedd..dfd8662a0c0e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -190,6 +190,8 @@ const struct rtw_table name ## _tbl = { \
#define BIT_3WIRE_TX_EN BIT(0)
#define BIT_3WIRE_RX_EN BIT(1)
#define BIT_3WIRE_PI_ON BIT(28)
+#define REG_ANAPAR_A 0x1830
+#define BIT_ANAPAR_UPDATE BIT(29)
#define REG_RXAGCCTL0 0x18ac
#define BITS_RXAGC_CCK GENMASK(15, 12)
#define BITS_RXAGC_OFDM GENMASK(8, 4)
@@ -223,6 +225,8 @@ const struct rtw_table name ## _tbl = { \
#define BIT_CCK_BLK_EN BIT(1)
#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
#define REG_CCAMSK 0x1c80
+#define REG_RSTB 0x1c90
+#define BIT_RSTB_3WIRE BIT(8)
#define REG_RX_BREAK 0x1d2c
#define BIT_COM_RX_GCK_EN BIT(31)
#define REG_RXFNCTL 0x1d30
@@ -243,6 +247,7 @@ const struct rtw_table name ## _tbl = { \
#define REG_OFDM_TXCNT 0x2de0
#define REG_ORITXCODE2 0x4100
#define REG_3WIRE2 0x410c
+#define REG_ANAPAR_B 0x4130
#define REG_RXAGCCTL 0x41ac
#define REG_DCKB_I_0 0x41bc
#define REG_DCKB_I_1 0x41c0
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 1bebba4e8527..5d6143a55187 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1468,12 +1468,15 @@ static void rsi_shutdown(struct device *dev)
struct rsi_91x_sdiodev *sdev =
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
struct ieee80211_hw *hw = adapter->hw;
- struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n");
- if (rsi_config_wowlan(adapter, wowlan))
- rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ if (hw) {
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+
+ if (rsi_config_wowlan(adapter, wowlan))
+ rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ }
if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 &&
adapter->priv->bt_adapter) {
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 94569cd695c8..c9a4e9a43400 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -15,9 +15,7 @@
#include <linux/wl12xx.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include "wl1251.h"
@@ -160,15 +158,6 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
int ret;
if (enable) {
- /*
- * Power is controlled by runtime PM, but we still call board
- * callback in case it wants to do any additional setup,
- * for example enabling clock buffer for the module.
- */
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, true);
-
-
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) {
pm_runtime_put_sync(&func->dev);
@@ -186,9 +175,6 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
ret = pm_runtime_put_sync(&func->dev);
if (ret < 0)
goto out;
-
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, false);
}
out:
@@ -241,31 +227,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl1251_board_data = wl1251_get_platform_data();
if (!IS_ERR(wl1251_board_data)) {
- wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
} else if (np) {
- wl->use_eeprom = of_property_read_bool(np,
- "ti,wl1251-has-eeprom");
- wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
wl->irq = of_irq_get(np, 0);
-
- if (wl->power_gpio == -EPROBE_DEFER ||
- wl->irq == -EPROBE_DEFER) {
+ if (wl->irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto disable;
}
}
- if (gpio_is_valid(wl->power_gpio)) {
- ret = devm_gpio_request(&func->dev, wl->power_gpio,
- "wl1251 power");
- if (ret) {
- wl1251_error("Failed to request gpio: %d\n", ret);
- goto disable;
- }
- }
-
if (wl->irq) {
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ace4dd9e953c..d3f23d6254e4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1078,9 +1078,9 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
spin_lock(&nvmeq->cq_poll_lock);
found = nvme_process_cq(nvmeq, &start, &end, -1);
+ nvme_complete_cqes(nvmeq, start, end);
spin_unlock(&nvmeq->cq_poll_lock);
- nvme_complete_cqes(nvmeq, start, end);
return found;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 8270bbf505fb..9f982c0627a0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -306,6 +306,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
rc = of_mdiobus_register_phy(mdio, child, addr);
if (rc && rc != -ENODEV)
goto unregister;
+ break;
}
}
}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index d20aabc26273..3a10e678c7f4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -670,7 +670,7 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || *rc_bar2_offset % *rc_bar2_size ||
+ if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
(*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
*rc_bar2_size, *rc_bar2_offset);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index fffa77093c08..4f4f54bc732e 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -50,12 +50,7 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
(PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
.rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
- .w1c = (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY) << 16,
+ .w1c = PCI_STATUS_ERROR_BITS << 16,
},
[PCI_CLASS_REVISION / 4] = { .ro = ~0 },
@@ -100,12 +95,7 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
PCI_STATUS_DEVSEL_MASK) << 16) |
GENMASK(11, 8) | GENMASK(3, 0)),
- .w1c = (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY) << 16,
+ .w1c = PCI_STATUS_ERROR_BITS << 16,
.rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
},
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d828ca835a98..86821313c007 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -173,6 +173,29 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
+/**
+ * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
+ * @pdev: the PCI device
+ *
+ * Returns error bits set in PCI_STATUS and clears them.
+ */
+int pci_status_get_and_clear_errors(struct pci_dev *pdev)
+{
+ u16 status;
+ int ret;
+
+ ret = pci_read_config_word(pdev, PCI_STATUS, &status);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return -EIO;
+
+ status &= PCI_STATUS_ERROR_BITS;
+ if (status)
+ pci_write_config_word(pdev, PCI_STATUS, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
+
#ifdef CONFIG_HAS_IOMEM
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
@@ -557,6 +580,40 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
}
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+/**
+ * pci_get_dsn - Read and return the 8-byte Device Serial Number
+ * @dev: PCI device to query
+ *
+ * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
+ * Number.
+ *
+ * Returns the DSN, or zero if the capability does not exist.
+ */
+u64 pci_get_dsn(struct pci_dev *dev)
+{
+ u32 dword;
+ u64 dsn;
+ int pos;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
+ if (!pos)
+ return 0;
+
+ /*
+ * The Device Serial Number is two dwords offset 4 bytes from the
+ * capability position. The specification says that the first dword is
+ * the lower half, and the second dword is the upper half.
+ */
+ pos += 4;
+ pci_read_config_dword(dev, pos, &dword);
+ dsn = (u64)dword;
+ pci_read_config_dword(dev, pos + 4, &dword);
+ dsn |= ((u64)dword) << 32;
+
+ return dsn;
+}
+EXPORT_SYMBOL_GPL(pci_get_dsn);
+
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index acce8781c456..f5c7a845cd7b 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -24,8 +24,6 @@ static int arm_pmu_acpi_register_irq(int cpu)
int gsi, trigger;
gicc = acpi_cpu_get_madt_gicc(cpu);
- if (WARN_ON(!gicc))
- return -EINVAL;
gsi = gicc->performance_interrupt;
@@ -64,11 +62,10 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
int gsi;
gicc = acpi_cpu_get_madt_gicc(cpu);
- if (!gicc)
- return;
gsi = gicc->performance_interrupt;
- acpi_unregister_gsi(gsi);
+ if (gsi)
+ acpi_unregister_gsi(gsi);
}
#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 95dca2cb5265..90884d14f95f 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -388,9 +388,10 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
if (enable) {
/*
- * must disable first, then enable again
- * otherwise, cycle counter will not work
- * if previous state is enabled.
+ * cycle counter is special which should firstly write 0 then
+ * write 1 into CLEAR bit to clear it. Other counters only
+ * need write 0 into CLEAR bit and it turns out to be 1 by
+ * hardware. Below enable flow is harmless for all counters.
*/
writel(0, pmu->base + reg);
val = CNTL_EN | CNTL_CLEAR;
@@ -398,7 +399,8 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
writel(val, pmu->base + reg);
} else {
/* Disable counter */
- writel(0, pmu->base + reg);
+ val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
+ writel(val, pmu->base + reg);
}
}
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
index 1169f3e83a6f..b1c04f71a31d 100644
--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -49,7 +49,7 @@
#define SUNXI_LOS_BIAS(n) ((n) << 3)
#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
-#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
+#define SUNXI_TXVBOOSTLVL_MASK GENMASK(2, 0)
struct sun50i_usb3_phy {
struct phy *phy;
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 4710cfcc3037..18251f232172 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -186,29 +186,6 @@ enum sata_phy_ctrl_regs {
PHY_CTRL_1_RESET = BIT(0),
};
-static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
-{
- struct brcm_sata_phy *priv = port->phy_priv;
- u32 size = 0;
-
- switch (priv->version) {
- case BRCM_SATA_PHY_STB_16NM:
- case BRCM_SATA_PHY_STB_28NM:
- case BRCM_SATA_PHY_IPROC_NS2:
- case BRCM_SATA_PHY_DSL_28NM:
- size = SATA_PCB_REG_28NM_SPACE_SIZE;
- break;
- case BRCM_SATA_PHY_STB_40NM:
- size = SATA_PCB_REG_40NM_SPACE_SIZE;
- break;
- default:
- dev_err(priv->dev, "invalid phy version\n");
- break;
- }
-
- return priv->phy_base + (port->portnum * size);
-}
-
static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
{
struct brcm_sata_phy *priv = port->phy_priv;
@@ -226,19 +203,34 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
return priv->ctrl_base + (port->portnum * size);
}
-static void brcm_sata_phy_wr(void __iomem *pcb_base, u32 bank,
+static void brcm_sata_phy_wr(struct brcm_sata_port *port, u32 bank,
u32 ofs, u32 msk, u32 value)
{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ void __iomem *pcb_base = priv->phy_base;
u32 tmp;
+ if (priv->version == BRCM_SATA_PHY_STB_40NM)
+ bank += (port->portnum * SATA_PCB_REG_40NM_SPACE_SIZE);
+ else
+ pcb_base += (port->portnum * SATA_PCB_REG_28NM_SPACE_SIZE);
+
writel(bank, pcb_base + SATA_PCB_BANK_OFFSET);
tmp = readl(pcb_base + SATA_PCB_REG_OFFSET(ofs));
tmp = (tmp & msk) | value;
writel(tmp, pcb_base + SATA_PCB_REG_OFFSET(ofs));
}
-static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
+static u32 brcm_sata_phy_rd(struct brcm_sata_port *port, u32 bank, u32 ofs)
{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ void __iomem *pcb_base = priv->phy_base;
+
+ if (priv->version == BRCM_SATA_PHY_STB_40NM)
+ bank += (port->portnum * SATA_PCB_REG_40NM_SPACE_SIZE);
+ else
+ pcb_base += (port->portnum * SATA_PCB_REG_28NM_SPACE_SIZE);
+
writel(bank, pcb_base + SATA_PCB_BANK_OFFSET);
return readl(pcb_base + SATA_PCB_REG_OFFSET(ofs));
}
@@ -250,16 +242,15 @@ static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
struct brcm_sata_phy *priv = port->phy_priv;
u32 tmp;
/* override the TX spread spectrum setting */
tmp = TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL | TXPMD_CONTROL1_TX_SSC_EN_FRC;
- brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp);
+ brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp);
/* set fixed min freq */
- brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2,
+ brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2,
~TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK,
STB_FMIN_VAL_DEFAULT);
@@ -271,7 +262,7 @@ static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
tmp = STB_FMAX_VAL_DEFAULT;
}
- brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
+ brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp);
}
@@ -280,7 +271,6 @@ static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
u32 tmp = 0, reg = 0;
switch (port->rxaeq_mode) {
@@ -301,8 +291,8 @@ static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port)
break;
}
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, reg, ~tmp, tmp);
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, reg, ~tmp, tmp);
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_0, reg, ~tmp, tmp);
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, reg, ~tmp, tmp);
return 0;
}
@@ -316,18 +306,17 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
u32 tmp, value;
/* Reduce CP tail current to 1/16th of its default value */
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
/* Turn off CP tail current boost */
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
/* Set a specific AEQ equalizer value */
tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE;
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
~(tmp | AEQ_RFZ_FRC_VAL |
AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT),
tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT);
@@ -337,7 +326,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
value = 0x52;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
~RXPMD_RX_PPM_VAL_MASK, value);
/* Set proportional loop bandwith Gen1/2/3 */
@@ -352,7 +341,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT |
1 << RXPMD_G2_CDR_PROP_BW_SHIFT |
1 << RXPMD_G3_CDR_PROB_BW_SHIFT;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
value);
/* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */
@@ -365,7 +354,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
~tmp, value);
/* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */
@@ -378,7 +367,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
~tmp, value);
/* Set no guard band and clamp CDR */
@@ -387,11 +376,11 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
value = 0x51;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
~tmp, RXPMD_MON_CORRECT_EN | value);
/* Turn on/off SSC */
- brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
+ brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
return 0;
@@ -411,7 +400,6 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
{
int try;
unsigned int val;
- void __iomem *base = brcm_sata_pcb_base(port);
void __iomem *ctrl_base = brcm_sata_ctrl_base(port);
struct device *dev = port->phy_priv->dev;
@@ -421,24 +409,24 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
val |= (0x4 << OOB_CTRL1_BURST_MIN_SHIFT);
val |= (0x9 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
val |= (0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
val = 0x0;
val |= (0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
val |= (0x2 << OOB_CTRL2_BURST_CNT_SHIFT);
val |= (0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
/* Configure PHY PLL register bank 1 */
val = NS2_PLL1_ACTRL2_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
val = NS2_PLL1_ACTRL3_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
val = NS2_PLL1_ACTRL4_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
/* Configure PHY BLOCK0 register bank */
/* Set oob_clk_sel to refclk/2 */
- brcm_sata_phy_wr(base, BLOCK0_REG_BANK, BLOCK0_SPARE,
+ brcm_sata_phy_wr(port, BLOCK0_REG_BANK, BLOCK0_SPARE,
~BLOCK0_SPARE_OOB_CLK_SEL_MASK,
BLOCK0_SPARE_OOB_CLK_SEL_REFBY2);
@@ -451,7 +439,7 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
/* Wait for PHY PLL lock by polling pll_lock bit */
try = 50;
while (try) {
- val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -471,9 +459,7 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
static int brcm_nsp_sata_init(struct brcm_sata_port *port)
{
- struct brcm_sata_phy *priv = port->phy_priv;
struct device *dev = port->phy_priv->dev;
- void __iomem *base = priv->phy_base;
unsigned int oob_bank;
unsigned int val, try;
@@ -490,36 +476,36 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port)
val |= (0x06 << OOB_CTRL1_BURST_MIN_SHIFT);
val |= (0x0f << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
val |= (0x06 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, oob_bank, OOB_CTRL1, 0x0, val);
+ brcm_sata_phy_wr(port, oob_bank, OOB_CTRL1, 0x0, val);
val = 0x0;
val |= (0x2e << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
val |= (0x02 << OOB_CTRL2_BURST_CNT_SHIFT);
val |= (0x16 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, oob_bank, OOB_CTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, oob_bank, OOB_CTRL2, 0x0, val);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL2,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_ACTRL2,
~(PLL_ACTRL2_SELDIV_MASK << PLL_ACTRL2_SELDIV_SHIFT),
0x0c << PLL_ACTRL2_SELDIV_SHIFT);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CONTROL,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_CAP_CONTROL,
0xff0, 0x4f0);
val = PLLCONTROL_0_FREQ_DET_RESTART | PLLCONTROL_0_FREQ_MONITOR;
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
~val, val);
val = PLLCONTROL_0_SEQ_START;
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
~val, 0);
mdelay(10);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
~val, val);
/* Wait for pll_seq_done bit */
try = 50;
while (--try) {
- val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -546,27 +532,25 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port)
static int brcm_sr_sata_init(struct brcm_sata_port *port)
{
- struct brcm_sata_phy *priv = port->phy_priv;
struct device *dev = port->phy_priv->dev;
- void __iomem *base = priv->phy_base;
unsigned int val, try;
/* Configure PHY PLL register bank 1 */
val = SR_PLL1_ACTRL2_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
val = SR_PLL1_ACTRL3_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
val = SR_PLL1_ACTRL4_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
/* Configure PHY PLL register bank 0 */
val = SR_PLL0_ACTRL6_MAGIC;
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val);
/* Wait for PHY PLL lock by polling pll_lock bit */
try = 50;
do {
- val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -581,7 +565,7 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
}
/* Invert Tx polarity */
- brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL0,
+ brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL0,
~TX_ACTRL0_TXPOL_FLIP, TX_ACTRL0_TXPOL_FLIP);
/* Configure OOB control to handle 100MHz reference clock */
@@ -589,52 +573,51 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
(0x4 << OOB_CTRL1_BURST_MIN_SHIFT) |
(0x8 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT) |
(0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT));
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
val = ((0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT) |
(0x2 << OOB_CTRL2_BURST_CNT_SHIFT) |
(0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT));
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
return 0;
}
static int brcm_dsl_sata_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
struct device *dev = port->phy_priv->dev;
unsigned int try;
u32 tmp;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
0, 0x3089);
usleep_range(1000, 2000);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
0, 0x3088);
usleep_range(1000, 2000);
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
0, 0x3000);
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
0, 0x3000);
usleep_range(1000, 2000);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
usleep_range(1000, 2000);
/* Acquire PLL lock */
try = 50;
while (try) {
- tmp = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ tmp = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (tmp & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -687,10 +670,9 @@ static int brcm_sata_phy_init(struct phy *phy)
static void brcm_stb_sata_calibrate(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
u32 tmp = BIT(8);
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
~tmp, tmp);
}
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index f20524f0c21d..94a34cf75eb3 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -20,6 +20,7 @@
#define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
#define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
+#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
#define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
#define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
@@ -243,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
{
struct phy_mdm6600 *ddata = data;
struct gpio_desc *mode_gpio1;
+ int error, wakeup;
mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
- dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n",
- gpiod_get_value(mode_gpio1));
+ wakeup = gpiod_get_value(mode_gpio1);
+ if (!wakeup)
+ return IRQ_NONE;
+
+ dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return IRQ_NONE;
+ }
+
+ /* Just wake-up and kick the autosuspend timer */
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
return IRQ_HANDLED;
}
@@ -496,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work)
ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
phy_mdm6600_wake_modem(ddata);
+
+ /*
+ * The modem does not always stay awake 1.2 seconds after toggling
+ * the wake GPIO, and sometimes it idles after about some 600 ms
+ * making writes time out.
+ */
schedule_delayed_work(&ddata->modem_wake_work,
- msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS));
+ msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
}
static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index cd5a6c95dbdc..a27b8d578d7f 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -688,11 +688,9 @@ struct phy *phy_get(struct device *dev, const char *string)
get_device(&phy->dev);
link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
- if (!link) {
- dev_err(dev, "failed to create device link to %s\n",
+ if (!link)
+ dev_dbg(dev, "failed to create device link to %s\n",
dev_name(phy->dev.parent));
- return ERR_PTR(-EINVAL);
- }
return phy;
}
@@ -803,11 +801,9 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
}
link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
- if (!link) {
- dev_err(dev, "failed to create device link to %s\n",
+ if (!link)
+ dev_dbg(dev, "failed to create device link to %s\n",
dev_name(phy->dev.parent));
- return ERR_PTR(-EINVAL);
- }
return phy;
}
@@ -852,11 +848,9 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
devres_add(dev, ptr);
link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
- if (!link) {
- dev_err(dev, "failed to create device link to %s\n",
+ if (!link)
+ dev_dbg(dev, "failed to create device link to %s\n",
dev_name(phy->dev.parent));
- return ERR_PTR(-EINVAL);
- }
return phy;
}
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index a28bd15297f5..1c536fc03c83 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -80,20 +80,20 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_MII:
- mode = AM33XX_GMII_SEL_MODE_MII;
+ case PHY_INTERFACE_MODE_GMII:
+ gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
default:
- dev_warn(dev,
- "port%u: unsupported mode: \"%s\". Defaulting to MII.\n",
- if_phy->id, phy_modes(rgmii_id));
+ dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
+ if_phy->id, phy_modes(submode));
return -EINVAL;
}
if_phy->phy_if_mode = submode;
dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n",
- __func__, if_phy->id, mode, rgmii_id,
+ __func__, if_phy->id, submode, rgmii_id,
if_phy->rmii_clock_external);
regfield = if_phy->fields[PHY_GMII_SEL_PORT_MODE];
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index 7b6409ef553c..dce2626384a9 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1073,13 +1073,26 @@ static int madera_pin_probe(struct platform_device *pdev)
return ret;
}
+ platform_set_drvdata(pdev, priv);
+
dev_dbg(priv->dev, "pinctrl probed ok\n");
return 0;
}
+static int madera_pin_remove(struct platform_device *pdev)
+{
+ struct madera_pin_private *priv = platform_get_drvdata(pdev);
+
+ if (priv->madera->pdata.gpio_configs)
+ pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs);
+
+ return 0;
+}
+
static struct platform_driver madera_pin_driver = {
.probe = madera_pin_probe,
+ .remove = madera_pin_remove,
.driver = {
.name = "madera-pinctrl",
},
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 446d84fe0e31..f23c55e22195 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2021,7 +2021,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
return PTR_ERR(pctldev->p);
}
- kref_get(&pctldev->p->users);
pctldev->hog_default =
pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
if (IS_ERR(pctldev->hog_default)) {
diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c
index 73bf1d9f9cc6..23cf04bdfc55 100644
--- a/drivers/pinctrl/freescale/pinctrl-scu.c
+++ b/drivers/pinctrl/freescale/pinctrl-scu.c
@@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set {
struct imx_sc_rpc_msg hdr;
u32 val;
u16 pad;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_req_pad_get {
struct imx_sc_rpc_msg hdr;
u16 pad;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_resp_pad_get {
struct imx_sc_rpc_msg hdr;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 1b6e8646700f..2ac921c83da9 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
-static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
-static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_4 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_5 };
static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
static const unsigned int nand_ce0_pins[] = { BOOT_8 };
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index a454f57c264e..62c02b969327 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
if (IS_ERR(falcon_info.clk[*bank])) {
dev_err(&ppdev->dev, "failed to get clock\n");
- of_node_put(np)
+ of_node_put(np);
return PTR_ERR(falcon_info.clk[*bank]);
}
falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 9a8daa256a32..1a948c3f54b7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1104,7 +1104,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
- pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
@@ -1118,7 +1117,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
if (!chip->irq.parent_domain)
return -EPROBE_DEFER;
chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
-
+ pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
/*
* Let's skip handling the GPIOs, if the parent irqchip
* is handling the direct connect IRQ of the GPIO.
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index fba1d41d20ec..338a15d08629 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
- girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell;
girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 3b424ffceb83..86400c708150 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -139,4 +139,16 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_VMW
+ tristate "VMware virtual PTP clock"
+ depends on ACPI && HYPERVISOR_GUEST && X86
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using VMware virtual precision
+ clock device as a PTP clock. This is only useful in virtual
+ machines running on VMware virtual infrastructure.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_vmw.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 01ff7fc3e820..7aff75f745dc 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -14,3 +14,4 @@ ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
+obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
new file mode 100644
index 000000000000..5dca26e14bdc
--- /dev/null
+++ b/drivers/ptp/ptp_vmw.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (C) 2020 VMware, Inc., Palo Alto, CA., USA
+ *
+ * PTP clock driver for VMware precision clock virtual device.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/hypervisor.h>
+#include <asm/vmware.h>
+
+#define VMWARE_MAGIC 0x564D5868
+#define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97)
+#define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0)
+
+static struct acpi_device *ptp_vmw_acpi_device;
+static struct ptp_clock *ptp_vmw_clock;
+
+
+static int ptp_vmw_pclk_read(u64 *ns)
+{
+ u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3;
+
+ asm volatile (VMWARE_HYPERCALL :
+ "=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1),
+ "=S"(unused2), "=D"(unused3) :
+ "a"(VMWARE_MAGIC), "b"(0),
+ "c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) :
+ "memory");
+
+ if (ret == 0)
+ *ns = ((u64)nsec_hi << 32) | nsec_lo;
+ return ret;
+}
+
+/*
+ * PTP clock ops.
+ */
+
+static int ptp_vmw_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ u64 ns;
+
+ if (ptp_vmw_pclk_read(&ns) != 0)
+ return -EIO;
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int ptp_vmw_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_vmw_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ptp_vmw",
+ .max_adj = 0,
+ .adjtime = ptp_vmw_adjtime,
+ .adjfreq = ptp_vmw_adjfreq,
+ .gettime64 = ptp_vmw_gettime,
+ .settime64 = ptp_vmw_settime,
+ .enable = ptp_vmw_enable,
+};
+
+/*
+ * ACPI driver ops for VMware "precision clock" virtual device.
+ */
+
+static int ptp_vmw_acpi_add(struct acpi_device *device)
+{
+ ptp_vmw_clock = ptp_clock_register(&ptp_vmw_clock_info, NULL);
+ if (IS_ERR(ptp_vmw_clock)) {
+ pr_err("failed to register ptp clock\n");
+ return PTR_ERR(ptp_vmw_clock);
+ }
+
+ ptp_vmw_acpi_device = device;
+ return 0;
+}
+
+static int ptp_vmw_acpi_remove(struct acpi_device *device)
+{
+ ptp_clock_unregister(ptp_vmw_clock);
+ return 0;
+}
+
+static const struct acpi_device_id ptp_vmw_acpi_device_ids[] = {
+ { "VMW0005", 0 },
+ { "", 0 },
+};
+
+MODULE_DEVICE_TABLE(acpi, ptp_vmw_acpi_device_ids);
+
+static struct acpi_driver ptp_vmw_acpi_driver = {
+ .name = "ptp_vmw",
+ .ids = ptp_vmw_acpi_device_ids,
+ .ops = {
+ .add = ptp_vmw_acpi_add,
+ .remove = ptp_vmw_acpi_remove
+ },
+ .owner = THIS_MODULE
+};
+
+static int __init ptp_vmw_init(void)
+{
+ if (x86_hyper_type != X86_HYPER_VMWARE)
+ return -1;
+ return acpi_bus_register_driver(&ptp_vmw_acpi_driver);
+}
+
+static void __exit ptp_vmw_exit(void)
+{
+ acpi_bus_unregister_driver(&ptp_vmw_acpi_driver);
+}
+
+module_init(ptp_vmw_init);
+module_exit(ptp_vmw_exit);
+
+MODULE_DESCRIPTION("VMware virtual PTP clock driver");
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index bdfaf7edb75a..992bc18101ef 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
}
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
- val = (val & ~STM32_ENVR) | STM32_HIZ;
+ val &= ~STM32_ENVR;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
pm_runtime_mark_last_busy(priv->dev);
@@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
.volt_table = stm32_vrefbuf_voltages,
.n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages),
.ops = &stm32_vrefbuf_volt_ops,
+ .off_on_delay = 1000,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
};
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index de3862c15fcc..56084635dd63 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -167,6 +167,12 @@ config QCOM_Q6V5_WCSS
Say y here to support the Qualcomm Peripheral Image Loader for the
Hexagon V5 based WCSS remote processors.
+config QCOM_Q6V5_IPA_NOTIFY
+ tristate
+ depends on QCOM_IPA
+ depends on QCOM_Q6V5_MSS
+ default QCOM_IPA
+
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
depends on RPMSG
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index e30a1b15fbac..0effd3825035 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
+obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
new file mode 100644
index 000000000000..e1c10a128bfd
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Qualcomm IPA notification subdev support
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+
+static void
+ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event)
+{
+ struct qcom_rproc_ipa_notify *ipa_notify;
+ qcom_ipa_notify_t notify;
+
+ ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev);
+ notify = ipa_notify->notify;
+ if (notify)
+ notify(ipa_notify->data, event);
+}
+
+static int ipa_notify_prepare(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_STARTING);
+
+ return 0;
+}
+
+static int ipa_notify_start(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_RUNNING);
+
+ return 0;
+}
+
+static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed)
+
+{
+ ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING);
+}
+
+static void ipa_notify_unprepare(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_OFFLINE);
+}
+
+static void ipa_notify_removing(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_REMOVING);
+}
+
+/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */
+void qcom_add_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify)
+{
+ ipa_notify->notify = NULL;
+ ipa_notify->data = NULL;
+ ipa_notify->subdev.prepare = ipa_notify_prepare;
+ ipa_notify->subdev.start = ipa_notify_start;
+ ipa_notify->subdev.stop = ipa_notify_stop;
+ ipa_notify->subdev.unprepare = ipa_notify_unprepare;
+
+ rproc_add_subdev(rproc, &ipa_notify->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev);
+
+/* Remove the IPA notification subdevice */
+void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify)
+{
+ struct rproc_subdev *subdev = &ipa_notify->subdev;
+
+ ipa_notify_removing(subdev);
+
+ rproc_remove_subdev(rproc, subdev);
+ ipa_notify->notify = NULL; /* Make it obvious */
+}
+EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index a1cc9cbe038f..f9ccce76e44b 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
+#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
@@ -201,6 +202,7 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_rproc_ipa_notify ipa_notify_subdev;
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
@@ -1540,6 +1542,39 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
return 0;
}
+#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
+
+/* Register IPA notification function */
+int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
+ void *data)
+{
+ struct qcom_rproc_ipa_notify *ipa_notify;
+ struct q6v5 *qproc = rproc->priv;
+
+ if (!notify)
+ return -EINVAL;
+
+ ipa_notify = &qproc->ipa_notify_subdev;
+ if (ipa_notify->notify)
+ return -EBUSY;
+
+ ipa_notify->notify = notify;
+ ipa_notify->data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
+
+/* Deregister IPA notification function */
+void qcom_deregister_ipa_notify(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+
+ qproc->ipa_notify_subdev.notify = NULL;
+}
+EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
+#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
@@ -1664,6 +1699,7 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
+ qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
@@ -1677,6 +1713,7 @@ static int q6v5_probe(struct platform_device *pdev)
return 0;
detach_proxy_pds:
+ qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
detach_active_pds:
q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
@@ -1693,6 +1730,7 @@ static int q6v5_remove(struct platform_device *pdev)
rproc_del(qproc->rproc);
qcom_remove_sysmon_subdev(qproc->sysmon);
+ qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 461b0e506a26..d9efbfd29646 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -51,6 +51,7 @@ config RESET_BRCMSTB
config RESET_BRCMSTB_RESCAL
bool "Broadcom STB RESCAL reset controller"
+ depends on HAS_IOMEM
default ARCH_BRCMSTB || COMPILE_TEST
help
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
@@ -73,7 +74,7 @@ config RESET_IMX7
config RESET_INTEL_GW
bool "Intel Reset Controller Driver"
- depends on OF
+ depends on OF && HAS_IOMEM
select REGMAP_MMIO
help
This enables the reset controller driver for Intel Gateway SoCs.
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index e115623b86b2..66e4bdca9d89 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -224,8 +224,15 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
- qdio_init->queue_start_poll_array[i] : NULL;
+ if (qdio_init->queue_start_poll_array &&
+ qdio_init->queue_start_poll_array[i]) {
+ q->u.in.queue_start_poll =
+ qdio_init->queue_start_poll_array[i];
+ set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state);
+ } else {
+ q->u.in.queue_start_poll = NULL;
+ }
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b7d64690ea38..962be94ed3ca 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -368,7 +368,7 @@ enum qeth_qdio_info_states {
struct qeth_buffer_pool_entry {
struct list_head list;
struct list_head init_list;
- void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
};
struct qeth_qdio_buffer_pool {
@@ -985,7 +985,7 @@ extern const struct attribute_group qeth_device_blkt_group;
extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
-int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fdc50543ce9a..6caa78d51bd1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -65,7 +65,6 @@ static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length);
-static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
@@ -212,49 +211,121 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
+ if (entry->elements[i])
+ __free_page(entry->elements[i]);
+ }
+
+ kfree(entry);
+}
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
+ init_list) {
+ list_del(&entry->init_list);
+ qeth_free_pool_entry(entry);
+ }
+}
+
+static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
+{
+ struct qeth_buffer_pool_entry *entry;
+ unsigned int i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ for (i = 0; i < pages; i++) {
+ entry->elements[i] = alloc_page(GFP_KERNEL);
+
+ if (!entry->elements[i]) {
+ qeth_free_pool_entry(entry);
+ return NULL;
+ }
+ }
+
+ return entry;
+}
+
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
- struct qeth_buffer_pool_entry *pool_entry;
- void *ptr;
- int i, j;
+ unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ unsigned int i;
QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
- pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
- if (!pool_entry) {
+ struct qeth_buffer_pool_entry *entry;
+
+ entry = qeth_alloc_pool_entry(buf_elements);
+ if (!entry) {
qeth_free_buffer_pool(card);
return -ENOMEM;
}
- for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
- ptr = (void *) __get_free_page(GFP_KERNEL);
- if (!ptr) {
- while (j > 0)
- free_page((unsigned long)
- pool_entry->elements[--j]);
- kfree(pool_entry);
- qeth_free_buffer_pool(card);
- return -ENOMEM;
- }
- pool_entry->elements[j] = ptr;
- }
- list_add(&pool_entry->init_list,
- &card->qdio.init_pool.entry_list);
+
+ list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
}
return 0;
}
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
{
+ unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
+ struct qeth_buffer_pool_entry *entry, *tmp;
+ int delta = count - pool->buf_count;
+ LIST_HEAD(entries);
+
QETH_CARD_TEXT(card, 2, "realcbp");
- /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
- qeth_clear_working_pool_list(card);
- qeth_free_buffer_pool(card);
- card->qdio.in_buf_pool.buf_count = bufcnt;
- card->qdio.init_pool.buf_count = bufcnt;
- return qeth_alloc_buffer_pool(card);
+ /* Defer until queue is allocated: */
+ if (!card->qdio.in_q)
+ goto out;
+
+ /* Remove entries from the pool: */
+ while (delta < 0) {
+ entry = list_first_entry(&pool->entry_list,
+ struct qeth_buffer_pool_entry,
+ init_list);
+ list_del(&entry->init_list);
+ qeth_free_pool_entry(entry);
+
+ delta++;
+ }
+
+ /* Allocate additional entries: */
+ while (delta > 0) {
+ entry = qeth_alloc_pool_entry(buf_elements);
+ if (!entry) {
+ list_for_each_entry_safe(entry, tmp, &entries,
+ init_list) {
+ list_del(&entry->init_list);
+ qeth_free_pool_entry(entry);
+ }
+
+ return -ENOMEM;
+ }
+
+ list_add(&entry->init_list, &entries);
+
+ delta--;
+ }
+
+ list_splice(&entries, &pool->entry_list);
+
+out:
+ card->qdio.in_buf_pool.buf_count = count;
+ pool->buf_count = count;
+ return 0;
}
-EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
{
@@ -1171,19 +1242,6 @@ void qeth_drain_output_queues(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
-static void qeth_free_buffer_pool(struct qeth_card *card)
-{
- struct qeth_buffer_pool_entry *pool_entry, *tmp;
- int i = 0;
- list_for_each_entry_safe(pool_entry, tmp,
- &card->qdio.init_pool.entry_list, init_list){
- for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
- free_page((unsigned long)pool_entry->elements[i]);
- list_del(&pool_entry->init_list);
- kfree(pool_entry);
- }
-}
-
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1205,7 +1263,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
if (count == 1)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
- card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
card->qdio.no_out_queues = count;
return 0;
}
@@ -2392,7 +2449,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
return;
qeth_free_cq(card);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb)
dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
@@ -2574,7 +2630,6 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct list_head *plh;
struct qeth_buffer_pool_entry *entry;
int i, free;
- struct page *page;
if (list_empty(&card->qdio.in_buf_pool.entry_list))
return NULL;
@@ -2583,7 +2638,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
free = 1;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
- if (page_count(virt_to_page(entry->elements[i])) > 1) {
+ if (page_count(entry->elements[i]) > 1) {
free = 0;
break;
}
@@ -2598,15 +2653,15 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
struct qeth_buffer_pool_entry, list);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
- if (page_count(virt_to_page(entry->elements[i])) > 1) {
- page = alloc_page(GFP_ATOMIC);
- if (!page) {
+ if (page_count(entry->elements[i]) > 1) {
+ struct page *page = alloc_page(GFP_ATOMIC);
+
+ if (!page)
return NULL;
- } else {
- free_page((unsigned long)entry->elements[i]);
- entry->elements[i] = page_address(page);
- QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
- }
+
+ __free_page(entry->elements[i]);
+ entry->elements[i] = page;
+ QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
}
}
list_del_init(&entry->list);
@@ -2624,12 +2679,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
ETH_HLEN +
sizeof(struct ipv6hdr));
if (!buf->rx_skb)
- return 1;
+ return -ENOMEM;
}
pool_entry = qeth_find_free_buffer_pool_entry(card);
if (!pool_entry)
- return 1;
+ return -ENOBUFS;
/*
* since the buffer is accessed only from the input_tasklet
@@ -2642,7 +2697,7 @@ static int qeth_init_input_buffer(struct qeth_card *card,
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
buf->buffer->element[i].addr =
- virt_to_phys(pool_entry->elements[i]);
+ page_to_phys(pool_entry->elements[i]);
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
@@ -2674,10 +2729,15 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
/* inbound queue */
qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
memset(&card->rx, 0, sizeof(struct qeth_rx));
+
qeth_initialize_working_pool_list(card);
/*give only as many buffers to hardware as we have buffer pool entries*/
- for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
- qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
+ rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+ if (rc)
+ return rc;
+ }
+
card->qdio.in_q->next_buf_to_init =
card->qdio.in_buf_pool.buf_count - 1;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
@@ -6582,9 +6642,6 @@ int qeth_open(struct net_device *dev)
QETH_CARD_TEXT(card, 4, "qethopen");
- if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
- return -EIO;
-
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
@@ -6634,6 +6691,8 @@ int qeth_stop(struct net_device *dev)
}
napi_disable(&card->napi);
+ qdio_stop_irq(CARD_DDEV(card), 0);
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 2bd9993aa60b..78cae61bc924 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -247,8 +247,8 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ unsigned int cnt;
char *tmp;
- int cnt, old_cnt;
int rc = 0;
mutex_lock(&card->conf_mutex);
@@ -257,13 +257,12 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
goto out;
}
- old_cnt = card->qdio.in_buf_pool.buf_count;
cnt = simple_strtoul(buf, &tmp, 10);
cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
- if (old_cnt != cnt) {
- rc = qeth_realloc_buffer_pool(card, cnt);
- }
+
+ rc = qeth_resize_buffer_pool(card, cnt);
+
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9972d96820f3..4c8e93132e08 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -284,6 +284,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_clear_ipacmd_list(card);
qeth_drain_output_queues(card);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
card->state = CARD_STATE_DOWN;
}
@@ -1567,23 +1568,11 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
return rc;
}
-/* generic VNICC request call back control */
-struct _qeth_l2_vnicc_request_cbctl {
- struct {
- union{
- u32 *sup_cmds;
- u32 *timeout;
- };
- } result;
-};
-
/* generic VNICC request call back */
static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
- struct _qeth_l2_vnicc_request_cbctl *cbctl =
- (struct _qeth_l2_vnicc_request_cbctl *) reply->param;
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
@@ -1596,9 +1585,9 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (sub_cmd == IPA_VNICC_QUERY_CMDS)
- *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
+ *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
- *cbctl->result.timeout = rep->data.getset_timeout.timeout;
+ *(u32 *)reply->param = rep->data.getset_timeout.timeout;
return 0;
}
@@ -1639,7 +1628,6 @@ static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
u32 *sup_cmds)
{
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqcm");
@@ -1650,10 +1638,7 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
- /* prepare callback control */
- cbctl.result.sup_cmds = sup_cmds;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
}
/* VNICC enable/disable characteristic request */
@@ -1677,7 +1662,6 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
u32 cmd, u32 *timeout)
{
struct qeth_vnicc_getset_timeout *getset_timeout;
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccgst");
@@ -1692,11 +1676,7 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
if (cmd == IPA_VNICC_SET_TIMEOUT)
getset_timeout->timeout = *timeout;
- /* prepare callback control */
- if (cmd == IPA_VNICC_GET_TIMEOUT)
- cbctl.result.timeout = timeout;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
/* set current VNICC flag state; called from sysfs store function */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 1000e18c1090..8a803d6c9357 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1172,6 +1172,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
qeth_drain_output_queues(card);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
card->state = CARD_STATE_DOWN;
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 29f2517d2a31..a3d1c3bdfadb 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -206,12 +206,11 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
card->options.sniffer = i;
- if (card->qdio.init_pool.buf_count !=
- QETH_IN_BUF_COUNT_MAX)
- qeth_realloc_buffer_pool(card,
- QETH_IN_BUF_COUNT_MAX);
- } else
+ qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX);
+ } else {
rc = -EPERM;
+ }
+
break;
default:
rc = -EINVAL;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2b1e4da1944f..4bfb79f20588 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -410,7 +410,7 @@ struct fsf_qtcb_bottom_port {
u8 cb_util;
u8 a_util;
u8 res2;
- u16 temperature;
+ s16 temperature;
u16 vcc;
u16 tx_bias;
u16 tx_power;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 494b9fe9cc94..a711a0d15100 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -800,7 +800,7 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
-ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 9c5f7c9178c6..2b865c6423e2 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -628,6 +628,8 @@ redisc:
}
out:
kref_put(&rdata->kref, fc_rport_destroy);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 604856e72cfb..5b19f5175c5c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1577,8 +1577,7 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
- u8 buf[8];
- int i, pos;
+ u64 dsn;
/*
* fdmi_enabled needs to be set for libfc to execute FDMI registration.
@@ -1591,18 +1590,11 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
*/
/* Get the PCI-e Device Serial Number Capability */
- pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- pos += 4;
- for (i = 0; i < 8; i++)
- pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
-
+ dsn = pci_get_dsn(qedf->pdev);
+ if (dsn)
snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number),
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- buf[7], buf[6], buf[5], buf[4],
- buf[3], buf[2], buf[1], buf[0]);
- } else
+ sizeof(fc_host->serial_number), "%016llX", dsn);
+ else
snprintf(fc_host->serial_number,
sizeof(fc_host->serial_number), "Unknown");
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e4282bce5834..f45c22b09726 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -161,6 +161,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
+ sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
unsigned int nr, i;
unsigned char *buf;
size_t offset, buflen = 0;
@@ -171,11 +172,15 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
/* Not a zoned device */
return -EOPNOTSUPP;
+ if (!capacity)
+ /* Device gone or invalid */
+ return -ENODEV;
+
buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
if (!buf)
return -ENOMEM;
- while (zone_idx < nr_zones && sector < get_capacity(disk)) {
+ while (zone_idx < nr_zones && sector < capacity) {
ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
sectors_to_logical(sdkp->device, sector), true);
if (ret)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0fbb8fe6e521..e4240e4ae8bb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -688,7 +688,7 @@ static const struct block_device_operations sr_bdops =
.release = sr_block_release,
.ioctl = sr_block_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = sr_block_compat_ioctl,
+ .compat_ioctl = sr_block_compat_ioctl,
#endif
.check_events = sr_block_check_events,
.revalidate_disk = sr_block_revalidate_disk,
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
index fb70b8a3f7c5..20d37eaeb5f2 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id {
u32 id;
} resp;
} data;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_misc_get_soc_uid {
struct imx_sc_rpc_msg hdr;
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index fd8007ebb145..13def7f78b9e 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -149,6 +149,7 @@ struct atmel_qspi {
struct clk *qspick;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ resource_size_t mmap_size;
u32 pending;
u32 mr;
u32 scr;
@@ -329,6 +330,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u32 sr, offset;
int err;
+ /*
+ * Check if the address exceeds the MMIO window size. An improvement
+ * would be to add support for regular SPI mode and fall back to it
+ * when the flash memories overrun the controller's memory space.
+ */
+ if (op->addr.val + op->data.nbytes > aq->mmap_size)
+ return -ENOTSUPP;
+
err = atmel_qspi_set_cfg(aq, op, &offset);
if (err)
return err;
@@ -480,6 +489,8 @@ static int atmel_qspi_probe(struct platform_device *pdev)
goto exit;
}
+ aq->mmap_size = resource_size(res);
+
/* Get the peripheral clock */
aq->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 7327309ea3d5..6c235306c0e4 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -366,7 +366,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
goto out_disable_clk;
rate = clk_get_rate(pll_clk);
- clk_disable_unprepare(pll_clk);
if (!rate) {
ret = -EINVAL;
goto out_disable_pll_clk;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7e2292c11d12..e9e256718ef4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -130,6 +130,7 @@ struct omap2_mcspi {
int fifo_depth;
bool slave_aborted;
unsigned int pin_dir:1;
+ size_t max_xfer_len;
};
struct omap2_mcspi_cs {
@@ -974,20 +975,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
* Note that we currently allow DMA only if we get a channel
* for both rx and tx. Otherwise we'll do PIO for both rx and tx.
*/
-static int omap2_mcspi_request_dma(struct spi_device *spi)
+static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
+ struct omap2_mcspi_dma *mcspi_dma)
{
- struct spi_master *master = spi->master;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
int ret = 0;
- mcspi = spi_master_get_devdata(master);
- mcspi_dma = mcspi->dma_channels + spi->chip_select;
-
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
- mcspi_dma->dma_rx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_rx_ch_name);
if (IS_ERR(mcspi_dma->dma_rx)) {
ret = PTR_ERR(mcspi_dma->dma_rx);
@@ -995,7 +988,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
goto no_dma;
}
- mcspi_dma->dma_tx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_tx_ch_name);
if (IS_ERR(mcspi_dma->dma_tx)) {
ret = PTR_ERR(mcspi_dma->dma_tx);
@@ -1004,20 +997,40 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
mcspi_dma->dma_rx = NULL;
}
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
no_dma:
return ret;
}
+static void omap2_mcspi_release_dma(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_dma *mcspi_dma;
+ int i;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ mcspi_dma = &mcspi->dma_channels[i];
+
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ }
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
+ }
+ }
+}
+
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
@@ -1042,13 +1055,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
}
}
- if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
- ret = omap2_mcspi_request_dma(spi);
- if (ret)
- dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n",
- ret);
- }
-
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
@@ -1065,12 +1071,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
static void omap2_mcspi_cleanup(struct spi_device *spi)
{
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs;
- mcspi = spi_master_get_devdata(spi->master);
-
if (spi->controller_state) {
/* Unlink controller state from context save list */
cs = spi->controller_state;
@@ -1079,19 +1081,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
kfree(cs);
}
- if (spi->chip_select < spi->master->num_chipselect) {
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
- if (mcspi_dma->dma_rx) {
- dma_release_channel(mcspi_dma->dma_rx);
- mcspi_dma->dma_rx = NULL;
- }
- if (mcspi_dma->dma_tx) {
- dma_release_channel(mcspi_dma->dma_tx);
- mcspi_dma->dma_tx = NULL;
- }
- }
-
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
}
@@ -1302,9 +1291,24 @@ static bool omap2_mcspi_can_dma(struct spi_master *master,
if (spi_controller_is_slave(master))
return true;
+ master->dma_rx = mcspi_dma->dma_rx;
+ master->dma_tx = mcspi_dma->dma_tx;
+
return (xfer->len >= DMA_MIN_BYTES);
}
+static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi->chip_select];
+
+ if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
+ return mcspi->max_xfer_len;
+
+ return SIZE_MAX;
+}
+
static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
@@ -1373,6 +1377,11 @@ static struct omap2_mcspi_platform_config omap4_pdata = {
.regs_offset = OMAP4_MCSPI_REG_OFFSET,
};
+static struct omap2_mcspi_platform_config am654_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+ .max_xfer_len = SZ_4K - 1,
+};
+
static const struct of_device_id omap_mcspi_of_match[] = {
{
.compatible = "ti,omap2-mcspi",
@@ -1382,6 +1391,10 @@ static const struct of_device_id omap_mcspi_of_match[] = {
.compatible = "ti,omap4-mcspi",
.data = &omap4_pdata,
},
+ {
+ .compatible = "ti,am654-mcspi",
+ .data = &am654_pdata,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
@@ -1439,6 +1452,10 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
+ if (pdata->max_xfer_len) {
+ mcspi->max_xfer_len = pdata->max_xfer_len;
+ master->max_transfer_size = omap2_mcspi_max_xfer_size;
+ }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcspi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -1464,6 +1481,11 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
for (i = 0; i < master->num_chipselect; i++) {
sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+
+ status = omap2_mcspi_request_dma(mcspi,
+ &mcspi->dma_channels[i]);
+ if (status == -EPROBE_DEFER)
+ goto free_master;
}
status = platform_get_irq(pdev, 0);
@@ -1501,6 +1523,7 @@ disable_pm:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
+ omap2_mcspi_release_dma(master);
spi_master_put(master);
return status;
}
@@ -1510,6 +1533,8 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_release_dma(master);
+
pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4c7a71f0fb3e..2e318158fca9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -70,6 +70,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_CAPS_CS_EN_SHIFT 9
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
+#define LPSS_PRIV_CLOCK_GATE 0x38
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
unsigned offset;
@@ -86,6 +90,8 @@ struct lpss_config {
unsigned cs_sel_shift;
unsigned cs_sel_mask;
unsigned cs_num;
+ /* Quirks */
+ unsigned cs_clk_stays_gated : 1;
};
/* Keep these sorted with enum pxa_ssp_type */
@@ -156,6 +162,7 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 56,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
},
};
@@ -383,6 +390,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
else
value |= LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ if (config->cs_clk_stays_gated) {
+ u32 clkgate;
+
+ /*
+ * Changing CS alone when dynamic clock gating is on won't
+ * actually flip CS at that time. This ruins SPI transfers
+ * that specify delays, or have no data. Toggle the clock mode
+ * to force on briefly to poke the CS pin to move.
+ */
+ clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
+ value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
+
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ }
}
static void cs_assert(struct spi_device *spi)
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index dd3434a407ea..a364b99497e2 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1217,6 +1217,11 @@ static int spi_qup_suspend(struct device *device)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
+ if (pm_runtime_suspended(device)) {
+ ret = spi_qup_pm_resume_runtime(device);
+ if (ret)
+ return ret;
+ }
ret = spi_master_suspend(master);
if (ret)
return ret;
@@ -1225,10 +1230,8 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
- if (!pm_runtime_suspended(device)) {
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
- }
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
return 0;
}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 60c4de4e4485..7412a3042a8d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -401,9 +401,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
-
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 38b4c78df506..755221bc3745 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2639,7 +2639,7 @@ int spi_register_controller(struct spi_controller *ctlr)
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
- return status;
+ goto free_bus_id;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
@@ -2649,7 +2649,7 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Legacy code path for GPIOs from DT */
status = of_spi_get_gpio_numbers(ctlr);
if (status)
- return status;
+ goto free_bus_id;
}
}
@@ -2657,17 +2657,14 @@ int spi_register_controller(struct spi_controller *ctlr)
* Even if it's just one always-selected device, there must
* be at least one chipselect.
*/
- if (!ctlr->num_chipselect)
- return -EINVAL;
+ if (!ctlr->num_chipselect) {
+ status = -EINVAL;
+ goto free_bus_id;
+ }
status = device_add(&ctlr->dev);
- if (status < 0) {
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
- }
+ if (status < 0)
+ goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
@@ -2683,11 +2680,7 @@ int spi_register_controller(struct spi_controller *ctlr)
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+ goto free_bus_id;
}
}
/* add statistics */
@@ -2702,7 +2695,12 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
-done:
+ return status;
+
+free_bus_id:
+ mutex_lock(&board_lock);
+ idr_remove(&spi_master_idr, ctlr->bus_num);
+ mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1e217e3e9486..2ab6e782f14c 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -396,6 +396,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
else
retval = get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
+ struct spi_controller *ctlr = spi->controller;
u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
@@ -403,6 +404,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp |= SPI_CS_HIGH;
+
tmp |= spi->mode & ~SPI_MODE_MASK;
spi->mode = (u16)tmp;
retval = spi_setup(spi);
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 97c615a2f057..c98835326135 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -558,13 +558,13 @@ static int hantro_attach_func(struct hantro_dev *vpu,
goto err_rel_entity1;
/* Connect the three entities */
- ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rel_entity2;
- ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
+ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 790997aff995..050c0da23c6f 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -714,6 +714,8 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
}
const struct ethtool_ops qlge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = ql_get_drvinfo,
.get_wol = ql_get_wol,
.set_wol = ql_set_wol,
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index a8b4d0c5ab7e..032f3264fba1 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work)
goto unref;
}
- console_lock();
set_selection_kernel(&sel, tty);
- console_unlock();
unref:
tty_kref_put(tty);
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
index 26de6762b942..081d58abd5ac 100644
--- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
@@ -93,5 +93,5 @@ Some properties are recognized either by SPI and SDIO versions:
Must contains 64 hexadecimal digits. Not supported in current version.
WFx driver also supports `mac-address` and `local-mac-address` as described in
-Documentation/devicetree/binding/net/ethernet.txt
+Documentation/devicetree/bindings/net/ethernet.txt
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 6370bb55f512..0026eb6f13ce 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -212,6 +212,19 @@ unlock:
return rc;
}
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
int amdtee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param)
@@ -236,15 +249,13 @@ int amdtee_open_session(struct tee_context *ctx,
/* Load the TA binary into TEE environment */
handle_load_ta(ta, ta_size, arg);
- if (arg->ret == TEEC_SUCCESS) {
- mutex_lock(&session_list_mutex);
- sess = alloc_session(ctxdata, arg->session);
- mutex_unlock(&session_list_mutex);
- }
-
if (arg->ret != TEEC_SUCCESS)
goto out;
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+
if (!sess) {
rc = -ENOMEM;
goto out;
@@ -259,40 +270,29 @@ int amdtee_open_session(struct tee_context *ctx,
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
}
/* Open session with loaded TA */
handle_open_session(arg, &session_info, param);
-
- if (arg->ret == TEEC_SUCCESS) {
- sess->session_info[i] = session_info;
- set_session_id(sess->ta_handle, i, &arg->session);
- } else {
+ if (arg->ret != TEEC_SUCCESS) {
pr_err("open_session failed %d\n", arg->ret);
spin_lock(&sess->lock);
clear_bit(i, sess->sess_mask);
spin_unlock(&sess->lock);
+ kref_put(&sess->refcount, destroy_session);
+ goto out;
}
+
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
}
-static void destroy_session(struct kref *ref)
-{
- struct amdtee_session *sess = container_of(ref, struct amdtee_session,
- refcount);
-
- /* Unload the TA from TEE */
- handle_unload_ta(sess->ta_handle);
- mutex_lock(&session_list_mutex);
- list_del(&sess->list_node);
- mutex_unlock(&session_list_mutex);
- kfree(sess);
-}
-
int amdtee_close_session(struct tee_context *ctx, u32 session)
{
struct amdtee_context_data *ctxdata = ctx->data;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 42345e79920c..c5f0d936b003 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
+#include <linux/platform_data/x86/apple.h>
static bool is_registered;
static DEFINE_IDA(ctrl_ida);
@@ -631,6 +632,15 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
if (ret)
return ret;
+ /*
+ * Apple machines provide an empty resource template, so on those
+ * machines just look for immediate children with a "baud" property
+ * (from the _DSM method) instead.
+ */
+ if (!lookup.controller_handle && x86_apple_machine &&
+ !acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, NULL))
+ acpi_get_parent(adev->handle, &lookup.controller_handle);
+
/* Make sure controller and ResourceSource handle match */
if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
return -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 91e9b070d36d..d330da76d6b6 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -25,6 +25,14 @@
#include "8250.h"
+#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
+#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
+#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
+#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
+#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
+#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
+#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
+
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
@@ -677,6 +685,22 @@ static int __maybe_unused exar_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
+static const struct exar8250_board acces_com_2x = {
+ .num_ports = 2,
+ .setup = pci_xr17c154_setup,
+};
+
+static const struct exar8250_board acces_com_4x = {
+ .num_ports = 4,
+ .setup = pci_xr17c154_setup,
+};
+
+static const struct exar8250_board acces_com_8x = {
+ .num_ports = 8,
+ .setup = pci_xr17c154_setup,
+};
+
+
static const struct exar8250_board pbn_fastcom335_2 = {
.num_ports = 2,
.setup = pci_fastcom335_setup,
@@ -745,6 +769,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
}
static const struct pci_device_id exar_pci_tbl[] = {
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
+
+
CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 91e2805e6441..c31b8f3db6bf 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -264,6 +264,7 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ bool id_allocated;
};
struct lpuart_soc_data {
@@ -2390,6 +2391,8 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
+EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
#define LPUART_CONSOLE (&lpuart_console)
#define LPUART32_CONSOLE (&lpuart32_console)
@@ -2420,19 +2423,6 @@ static int lpuart_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&pdev->dev, "port line is full, add device failed\n");
- return ret;
- }
- }
- if (ret >= ARRAY_SIZE(lpuart_ports)) {
- dev_err(&pdev->dev, "serial%d out of range\n", ret);
- return -EINVAL;
- }
- sport->port.line = ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sport->port.membase))
@@ -2477,9 +2467,25 @@ static int lpuart_probe(struct platform_device *pdev)
}
}
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "port line is full, add device failed\n");
+ return ret;
+ }
+ sport->id_allocated = true;
+ }
+ if (ret >= ARRAY_SIZE(lpuart_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", ret);
+ ret = -EINVAL;
+ goto failed_out_of_range;
+ }
+ sport->port.line = ret;
+
ret = lpuart_enable_clks(sport);
if (ret)
- return ret;
+ goto failed_clock_enable;
sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
lpuart_ports[sport->port.line] = sport;
@@ -2529,6 +2535,10 @@ static int lpuart_probe(struct platform_device *pdev)
failed_attach_port:
failed_irq_request:
lpuart_disable_clks(sport);
+failed_clock_enable:
+failed_out_of_range:
+ if (sport->id_allocated)
+ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
return ret;
}
@@ -2538,7 +2548,8 @@ static int lpuart_remove(struct platform_device *pdev)
uart_remove_one_port(&lpuart_reg, &sport->port);
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
+ if (sport->id_allocated)
+ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
lpuart_disable_clks(sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index c12a12556339..4e9a590712cb 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
port->membase = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(port->membase))
- return -PTR_ERR(port->membase);
+ return PTR_ERR(port->membase);
mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
GFP_KERNEL);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 0c50d7410b31..d7d2e4b844bc 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -181,7 +181,7 @@ int set_selection_user(const struct tiocl_selection __user *sel,
return set_selection_kernel(&v, tty);
}
-int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int new_sel_start, new_sel_end, spc;
@@ -214,7 +214,6 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
if (ps > pe) /* make sel_start <= sel_end */
swap(ps, pe);
- mutex_lock(&sel_lock);
if (sel_cons != vc_cons[fg_console].d) {
clear_selection();
sel_cons = vc_cons[fg_console].d;
@@ -260,10 +259,9 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
break;
case TIOCL_SELPOINTER:
highlight_pointer(pe);
- goto unlock;
+ return 0;
default:
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
/* remove the pointer */
@@ -285,7 +283,7 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
else if (new_sel_start == sel_start)
{
if (new_sel_end == sel_end) /* no action required */
- goto unlock;
+ return 0;
else if (new_sel_end > sel_end) /* extend to right */
highlight(sel_end + 2, new_sel_end);
else /* contract from right */
@@ -313,8 +311,7 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
- ret = -ENOMEM;
- goto unlock;
+ return -ENOMEM;
}
kfree(sel_buffer);
sel_buffer = bp;
@@ -339,8 +336,20 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
}
}
sel_buffer_lth = bp - sel_buffer;
-unlock:
+
+ return ret;
+}
+
+int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+{
+ int ret;
+
+ mutex_lock(&sel_lock);
+ console_lock();
+ ret = __set_selection_kernel(v, tty);
+ console_unlock();
mutex_unlock(&sel_lock);
+
return ret;
}
EXPORT_SYMBOL_GPL(set_selection_kernel);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 0cfbb7182b5a..15d27698054a 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3046,10 +3046,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type)
{
case TIOCL_SETSEL:
- console_lock();
ret = set_selection_user((struct tiocl_selection
__user *)(p+1), tty);
- console_unlock();
break;
case TIOCL_PASTESEL:
ret = paste_selection(tty);
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 736b0c6e27fe..3574dbb09366 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -2550,7 +2550,7 @@ found:
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
- (priv_req->start_trb * TRB_SIZE));
+ ((priv_req->end_trb + 1) * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |
TRB_TYPE(TRB_LINK) | TRB_CHAIN;
@@ -2595,11 +2595,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct usb_request *request;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *trb = NULL;
int ret;
int val;
trace_cdns3_halt(priv_ep, 0, 0);
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ if (request) {
+ priv_req = to_cdns3_request(request);
+ trb = priv_req->trb;
+ if (trb)
+ trb->control = trb->control ^ TRB_CYCLE;
+ }
+
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
/* wait for EPRST cleared */
@@ -2610,10 +2620,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
- request = cdns3_next_request(&priv_ep->pending_req_list);
-
- if (request)
+ if (request) {
+ if (trb)
+ trb->control = trb->control ^ TRB_CYCLE;
cdns3_rearm_transfer(priv_ep, 1);
+ }
cdns3_start_all_request(priv_dev, priv_ep);
return ret;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1d212f82c69b..54cd8ef795ec 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -988,13 +988,17 @@ int usb_remove_device(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_interface *intf;
+ int ret;
if (!udev->parent) /* Can't remove a root hub */
return -EINVAL;
hub = usb_hub_to_struct_hub(udev->parent);
intf = to_usb_interface(hub->intfdev);
- usb_autopm_get_interface(intf);
+ ret = usb_autopm_get_interface(intf);
+ if (ret < 0)
+ return ret;
+
set_bit(udev->portnum, hub->removed_bits);
hub_port_logical_disconnect(hub, udev->portnum);
usb_autopm_put_interface(intf);
@@ -1866,7 +1870,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
hub->quirk_disable_autosuspend = 1;
- usb_autopm_get_interface(intf);
+ usb_autopm_get_interface_no_resume(intf);
}
if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index bbbb35fa639f..235a7c645503 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev)
if (!port_dev->is_superspeed && peer)
pm_runtime_get_sync(&peer->dev);
- usb_autopm_get_interface(intf);
+ retval = usb_autopm_get_interface(intf);
+ if (retval < 0)
+ return retval;
+
retval = usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
if (udev && !retval) {
@@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev)
if (usb_port_block_power_off)
return -EBUSY;
- usb_autopm_get_interface(intf);
+ retval = usb_autopm_get_interface(intf);
+ if (retval < 0)
+ return retval;
+
retval = usb_hub_set_port_power(hdev, hub, port1, false);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
if (!port_dev->is_superspeed)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 2b24336a72e5..2dac3e7cdd97 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech PTZ Pro Camera */
{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Logitech Screen Share */
+ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1b7d2f9cb673..1e00bf2d65a2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1071,7 +1071,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
unsigned int rem = length % maxp;
unsigned chain = true;
- if (sg_is_last(s))
+ /*
+ * IOMMU driver is coalescing the list of sgs which shares a
+ * page boundary into one and giving it to USB driver. With
+ * this the number of sgs mapped is not equal to the number of
+ * sgs passed. So mark the chain bit to false if it isthe last
+ * mapped sg.
+ */
+ if (i == remaining - 1)
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 10c9e7f6273e..29fe5771c21b 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -424,10 +424,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return err;
}
- hub->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(hub->vdd))
- return PTR_ERR(hub->vdd);
-
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -640,6 +636,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
}
#endif /* CONFIG_OF */
+static void usb251xb_regulator_disable_action(void *data)
+{
+ struct usb251xb *hub = data;
+
+ regulator_disable(hub->vdd);
+}
+
static int usb251xb_probe(struct usb251xb *hub)
{
struct device *dev = hub->dev;
@@ -676,10 +679,19 @@ static int usb251xb_probe(struct usb251xb *hub)
if (err)
return err;
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
err = regulator_enable(hub->vdd);
if (err)
return err;
+ err = devm_add_action_or_reset(dev,
+ usb251xb_regulator_disable_action, hub);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1cd9b6305b06..1880f3e13f57 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
+UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
+ "Samsung",
+ "Flash Drive FIT",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64),
+
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 403707a3e503..0093bbd0d326 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -456,6 +456,13 @@ config BACKLIGHT_RAVE_SP
help
Support for backlight control on RAVE SP device.
+config BACKLIGHT_LED
+ tristate "Generic LED based Backlight Driver"
+ depends on LEDS_CLASS && OF
+ help
+ If you have a LCD backlight adjustable by LED class driver, say Y
+ to enable this driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endmenu
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 6f8777037c37..0c1a1524627a 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -57,3 +57,4 @@ obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ARCXCNN) += arcxcnn_bl.o
obj-$(CONFIG_BACKLIGHT_RAVE_SP) += rave-sp-backlight.o
+obj-$(CONFIG_BACKLIGHT_LED) += led_bl.o
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
new file mode 100644
index 000000000000..3f66549997c8
--- /dev/null
+++ b/drivers/video/backlight/led_bl.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * Based on pwm_bl.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct led_bl_data {
+ struct device *dev;
+ struct backlight_device *bl_dev;
+ struct led_classdev **leds;
+ bool enabled;
+ int nb_leds;
+ unsigned int *levels;
+ unsigned int default_brightness;
+ unsigned int max_brightness;
+};
+
+static void led_bl_set_brightness(struct led_bl_data *priv, int level)
+{
+ int i;
+ int bkl_brightness;
+
+ if (priv->levels)
+ bkl_brightness = priv->levels[level];
+ else
+ bkl_brightness = level;
+
+ for (i = 0; i < priv->nb_leds; i++)
+ led_set_brightness(priv->leds[i], bkl_brightness);
+
+ priv->enabled = true;
+}
+
+static void led_bl_power_off(struct led_bl_data *priv)
+{
+ int i;
+
+ if (!priv->enabled)
+ return;
+
+ for (i = 0; i < priv->nb_leds; i++)
+ led_set_brightness(priv->leds[i], LED_OFF);
+
+ priv->enabled = false;
+}
+
+static int led_bl_update_status(struct backlight_device *bl)
+{
+ struct led_bl_data *priv = bl_get_data(bl);
+ int brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
+ brightness = 0;
+
+ if (brightness > 0)
+ led_bl_set_brightness(priv, brightness);
+ else
+ led_bl_power_off(priv);
+
+ return 0;
+}
+
+static const struct backlight_ops led_bl_ops = {
+ .update_status = led_bl_update_status,
+};
+
+static int led_bl_get_leds(struct device *dev,
+ struct led_bl_data *priv)
+{
+ int i, nb_leds, ret;
+ struct device_node *node = dev->of_node;
+ struct led_classdev **leds;
+ unsigned int max_brightness;
+ unsigned int default_brightness;
+
+ ret = of_count_phandle_with_args(node, "leds", NULL);
+ if (ret < 0) {
+ dev_err(dev, "Unable to get led count\n");
+ return -EINVAL;
+ }
+
+ nb_leds = ret;
+ if (nb_leds < 1) {
+ dev_err(dev, "At least one LED must be specified!\n");
+ return -EINVAL;
+ }
+
+ leds = devm_kzalloc(dev, sizeof(struct led_classdev *) * nb_leds,
+ GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < nb_leds; i++) {
+ leds[i] = devm_of_led_get(dev, i);
+ if (IS_ERR(leds[i]))
+ return PTR_ERR(leds[i]);
+ }
+
+ /* check that the LEDs all have the same brightness range */
+ max_brightness = leds[0]->max_brightness;
+ for (i = 1; i < nb_leds; i++) {
+ if (max_brightness != leds[i]->max_brightness) {
+ dev_err(dev, "LEDs must have identical ranges\n");
+ return -EINVAL;
+ }
+ }
+
+ /* get the default brightness from the first LED from the list */
+ default_brightness = leds[0]->brightness;
+
+ priv->nb_leds = nb_leds;
+ priv->leds = leds;
+ priv->max_brightness = max_brightness;
+ priv->default_brightness = default_brightness;
+
+ return 0;
+}
+
+static int led_bl_parse_levels(struct device *dev,
+ struct led_bl_data *priv)
+{
+ struct device_node *node = dev->of_node;
+ int num_levels;
+ u32 value;
+ int ret;
+
+ if (!node)
+ return -ENODEV;
+
+ num_levels = of_property_count_u32_elems(node, "brightness-levels");
+ if (num_levels > 1) {
+ int i;
+ unsigned int db;
+ u32 *levels = NULL;
+
+ levels = devm_kzalloc(dev, sizeof(u32) * num_levels,
+ GFP_KERNEL);
+ if (!levels)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(node, "brightness-levels",
+ levels,
+ num_levels);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Try to map actual LED brightness to backlight brightness
+ * level
+ */
+ db = priv->default_brightness;
+ for (i = 0 ; i < num_levels; i++) {
+ if ((i && db > levels[i-1]) && db <= levels[i])
+ break;
+ }
+ priv->default_brightness = i;
+ priv->max_brightness = num_levels - 1;
+ priv->levels = levels;
+ } else if (num_levels >= 0)
+ dev_warn(dev, "Not enough levels defined\n");
+
+ ret = of_property_read_u32(node, "default-brightness-level", &value);
+ if (!ret && value <= priv->max_brightness)
+ priv->default_brightness = value;
+ else if (!ret && value > priv->max_brightness)
+ dev_warn(dev, "Invalid default brightness. Ignoring it\n");
+
+ return 0;
+}
+
+static int led_bl_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props;
+ struct led_bl_data *priv;
+ int ret, i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->dev = &pdev->dev;
+
+ ret = led_bl_get_leds(&pdev->dev, priv);
+ if (ret)
+ return ret;
+
+ ret = led_bl_parse_levels(&pdev->dev, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to parse DT data\n");
+ return ret;
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = priv->max_brightness;
+ props.brightness = priv->default_brightness;
+ props.power = (priv->default_brightness > 0) ? FB_BLANK_POWERDOWN :
+ FB_BLANK_UNBLANK;
+ priv->bl_dev = backlight_device_register(dev_name(&pdev->dev),
+ &pdev->dev, priv, &led_bl_ops, &props);
+ if (IS_ERR(priv->bl_dev)) {
+ dev_err(&pdev->dev, "Failed to register backlight\n");
+ return PTR_ERR(priv->bl_dev);
+ }
+
+ for (i = 0; i < priv->nb_leds; i++)
+ led_sysfs_disable(priv->leds[i]);
+
+ backlight_update_status(priv->bl_dev);
+
+ return 0;
+}
+
+static int led_bl_remove(struct platform_device *pdev)
+{
+ struct led_bl_data *priv = platform_get_drvdata(pdev);
+ struct backlight_device *bl = priv->bl_dev;
+ int i;
+
+ backlight_device_unregister(bl);
+
+ led_bl_power_off(priv);
+ for (i = 0; i < priv->nb_leds; i++)
+ led_sysfs_enable(priv->leds[i]);
+
+ return 0;
+}
+
+static const struct of_device_id led_bl_of_match[] = {
+ { .compatible = "led-backlight" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, led_bl_of_match);
+
+static struct platform_driver led_bl_driver = {
+ .driver = {
+ .name = "led-backlight",
+ .of_match_table = of_match_ptr(led_bl_of_match),
+ },
+ .probe = led_bl_probe,
+ .remove = led_bl_remove,
+};
+
+module_platform_driver(led_bl_driver);
+
+MODULE_DESCRIPTION("LED based Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:led-backlight");
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index de7b8382aba9..998b0de1812f 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height, unsigned int user)
{
+ if ((width << 1) * height > vga_vram_size)
+ return -EINVAL;
+
if (width % 2 || width > screen_info.orig_video_cols ||
height > (screen_info.orig_video_lines * vga_default_font_height)/
c->vc_font.height)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 7bfe365d9372..341458fd95ca 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -959,8 +959,8 @@ out_iput:
iput(vb->vb_dev_info.inode);
out_kern_unmount:
kern_unmount(balloon_mnt);
-#endif
out_del_vqs:
+#endif
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 867c7ebd3f10..58b96baa8d48 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
vq->split.queue_size_in_bytes,
vq->split.vring.desc,
vq->split.queue_dma_addr);
-
- kfree(vq->split.desc_state);
}
}
+ if (!vq->packed_ring)
+ kfree(vq->split.desc_state);
list_del(&_vq->list);
kfree(vq);
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index b069349b52f5..3065dd670a18 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -54,6 +54,13 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+#define WDAT_DEFAULT_TIMEOUT 30
+
+static int timeout = WDAT_DEFAULT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(WDAT_DEFAULT_TIMEOUT) ")");
+
static int wdat_wdt_read(struct wdat_wdt *wdat,
const struct wdat_instruction *instr, u32 *value)
{
@@ -389,7 +396,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
memset(&r, 0, sizeof(r));
r.start = gas->address;
- r.end = r.start + gas->access_width - 1;
+ r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
r.flags = IORESOURCE_MEM;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
@@ -438,6 +445,22 @@ static int wdat_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdat);
+ /*
+ * Set initial timeout so that userspace has time to configure the
+ * watchdog properly after it has opened the device. In some cases
+ * the BIOS default is too short and causes immediate reboot.
+ */
+ if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms ||
+ timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) {
+ dev_warn(dev, "Invalid timeout %d given, using %d\n",
+ timeout, WDAT_DEFAULT_TIMEOUT);
+ timeout = WDAT_DEFAULT_TIMEOUT;
+ }
+
+ ret = wdat_wdt_set_timeout(&wdat->wdd, timeout);
+ if (ret)
+ return ret;
+
watchdog_set_nowayout(&wdat->wdd, nowayout);
return devm_watchdog_register_device(dev, &wdat->wdd);
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index ce1077e32466..7c95516a860f 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -52,7 +52,7 @@ struct xen_pcibk_dev_data {
unsigned int ack_intr:1; /* .. and ACK-ing */
unsigned long handled;
unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
- char irq_name[0]; /* xen-pcibk[000:04:00.0] */
+ char irq_name[]; /* xen-pcibk[000:04:00.0] */
};
/* Used by XenBus and xen_pcibk_ops.c */
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index d239fc3c5e3d..eb5151fc8efa 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -313,6 +313,8 @@ static int process_msg(void)
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
+ /* write body, then update state */
+ virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
} else
@@ -395,6 +397,8 @@ static int process_writes(void)
if (state.req->state == xb_req_state_aborted)
kfree(state.req);
else {
+ /* write err, then update state */
+ virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 66975da4f3b6..8c4d05b687b7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -239,9 +239,9 @@ int xenbus_dev_probe(struct device *_dev)
goto fail;
}
- spin_lock(&dev->reclaim_lock);
+ down(&dev->reclaim_sem);
err = drv->probe(dev, id);
- spin_unlock(&dev->reclaim_lock);
+ up(&dev->reclaim_sem);
if (err)
goto fail_put;
@@ -271,9 +271,9 @@ int xenbus_dev_remove(struct device *_dev)
free_otherend_watch(dev);
if (drv->remove) {
- spin_lock(&dev->reclaim_lock);
+ down(&dev->reclaim_sem);
drv->remove(dev);
- spin_unlock(&dev->reclaim_lock);
+ up(&dev->reclaim_sem);
}
module_put(drv->driver.owner);
@@ -473,7 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
goto fail;
dev_set_name(&xendev->dev, "%s", devname);
- spin_lock_init(&xendev->reclaim_lock);
+ sema_init(&xendev->reclaim_sem, 1);
/* Register with generic device framework. */
err = device_register(&xendev->dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 791f6fe01e91..9b2fbe69bccc 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -45,6 +45,7 @@
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/export.h>
+#include <linux/semaphore.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -257,10 +258,10 @@ static int backend_reclaim_memory(struct device *dev, void *data)
drv = to_xenbus_driver(dev->driver);
if (drv && drv->reclaim_memory) {
xdev = to_xenbus_device(dev);
- if (!spin_trylock(&xdev->reclaim_lock))
+ if (down_trylock(&xdev->reclaim_sem))
return 0;
drv->reclaim_memory(xdev);
- spin_unlock(&xdev->reclaim_lock);
+ up(&xdev->reclaim_sem);
}
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ddc18da61834..3a06eb699f33 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -191,8 +191,11 @@ static bool xenbus_ok(void)
static bool test_reply(struct xb_req_data *req)
{
- if (req->state == xb_req_state_got_reply || !xenbus_ok())
+ if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
+ /* read req->state before all other fields */
+ virt_rmb();
return true;
+ }
/* Make sure to reread req->state each time. */
barrier();
@@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req)
static void *read_reply(struct xb_req_data *req)
{
- while (req->state != xb_req_state_got_reply) {
+ do {
wait_event(req->wq, test_reply(req));
if (!xenbus_ok())
@@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req)
if (req->err)
return ERR_PTR(req->err);
- }
+ } while (req->state != xb_req_state_got_reply);
return req->body;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1ccb3f8d528d..27076ebadb36 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7783,6 +7783,7 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
{
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
+ u16 csum_size;
blk_status_t ret;
/*
@@ -7802,7 +7803,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
file_offset -= dip->logical_offset;
file_offset >>= inode->i_sb->s_blocksize_bits;
- io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
+ csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
+ io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
return 0;
}
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 606f26d862dc..cc3ada12848d 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -324,6 +324,8 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
if (full_path == NULL)
goto cdda_exit;
+ convert_delimiter(full_path, '\\');
+
cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
if (!cifs_sb_master_tlink(cifs_sb)) {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 46ebaf3f0824..fa77fe5258b0 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -530,6 +530,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
if (tcon->seal)
seq_puts(s, ",seal");
+ else if (tcon->ses->server->ignore_signature)
+ seq_puts(s, ",signloosely");
if (tcon->nocase)
seq_puts(s, ",nocase");
if (tcon->local_lease)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de82cfa44b1a..0d956360e984 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1281,6 +1281,7 @@ struct cifs_fid {
__u64 volatile_fid; /* volatile file id for smb2 */
__u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
__u8 create_guid[16];
+ __u32 access;
struct cifs_pending_open *pending_open;
unsigned int epoch;
#ifdef CONFIG_CIFS_DEBUG2
@@ -1741,6 +1742,12 @@ static inline bool is_retryable_error(int error)
return false;
}
+
+/* cifs_get_writable_file() flags */
+#define FIND_WR_ANY 0
+#define FIND_WR_FSUID_ONLY 1
+#define FIND_WR_WITH_DELETE 2
+
#define MID_FREE 0
#define MID_REQUEST_ALLOCATED 1
#define MID_REQUEST_SUBMITTED 2
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 89eaaf46d1ca..e5cb681ec138 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -134,11 +134,12 @@ extern bool backup_cred(struct cifs_sb_info *);
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written);
-extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
+extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only,
+ int flags,
struct cifsFileInfo **ret_file);
extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ int flags,
struct cifsFileInfo **ret_file);
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3c89569e7210..6f6fb3606a5d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1492,6 +1492,7 @@ openRetry:
*oplock = rsp->OplockLevel;
/* cifs fid stays in le */
oparms->fid->netfid = rsp->Fid;
+ oparms->fid->access = desired_access;
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
@@ -2115,7 +2116,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
wdata2->tailsz = tailsz;
wdata2->bytes = cur_len;
- rc = cifs_get_writable_file(CIFS_I(inode), false,
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
&wdata2->cfile);
if (!wdata2->cfile) {
cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 0ef099442f20..36e7b2fd2190 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -555,7 +555,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
cifs_del_pending_open(&open);
- fput(file);
rc = -ENOMEM;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index bc9516ab4b34..3b942ecdd4be 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1958,7 +1958,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
/* Return -EBADF if no handle is found and general rc otherwise */
int
-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
+cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *open_file, *inv_file = NULL;
@@ -1966,7 +1966,8 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
-
+ bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
+ bool with_delete = flags & FIND_WR_WITH_DELETE;
*ret_file = NULL;
/*
@@ -1998,6 +1999,8 @@ refind_writable:
continue;
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
+ if (with_delete && !(open_file->fid.access & DELETE))
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
@@ -2045,12 +2048,12 @@ refind_writable:
}
struct cifsFileInfo *
-find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
+find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
{
struct cifsFileInfo *cfile;
int rc;
- rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
+ rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
if (rc)
cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
@@ -2059,6 +2062,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
int
cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ int flags,
struct cifsFileInfo **ret_file)
{
struct list_head *tmp;
@@ -2085,7 +2089,7 @@ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
kfree(full_path);
cinode = CIFS_I(d_inode(cfile->dentry));
spin_unlock(&tcon->open_file_lock);
- return cifs_get_writable_file(cinode, 0, ret_file);
+ return cifs_get_writable_file(cinode, flags, ret_file);
}
spin_unlock(&tcon->open_file_lock);
@@ -2162,7 +2166,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
if (mapping->host->i_size - offset < (loff_t)to)
to = (unsigned)(mapping->host->i_size - offset);
- rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
+ rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
+ &open_file);
if (!rc) {
bytes_written = cifs_write(open_file, open_file->pid,
write_data, to - from, &offset);
@@ -2355,7 +2360,7 @@ retry:
if (cfile)
cifsFileInfo_put(cfile);
- rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
/* in case of an error store it to return later */
if (rc)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b5e6635c578e..1e8a4b1579db 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -653,8 +653,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
*/
if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
!info->DeletePending) {
- cifs_dbg(1, "bogus file nlink value %u\n",
- fattr->cf_nlink);
+ cifs_dbg(VFS, "bogus file nlink value %u\n",
+ fattr->cf_nlink);
fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
}
}
@@ -2073,6 +2073,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
struct inode *inode = d_inode(dentry);
struct super_block *sb = dentry->d_sb;
char *full_path = NULL;
+ int count = 0;
if (inode == NULL)
return -ENOENT;
@@ -2094,15 +2095,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
full_path, inode, inode->i_count.counter,
dentry, cifs_get_time(dentry), jiffies);
+again:
if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
-
+ if (rc == -EAGAIN && count++ < 10)
+ goto again;
out:
kfree(full_path);
free_xid(xid);
+
return rc;
}
@@ -2278,7 +2282,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
* writebehind data than the SMB timeout for the SetPathInfo
* request would allow
*/
- open_file = find_writable_file(cifsInode, true);
+ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
if (open_file) {
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
@@ -2428,7 +2432,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->ctime = NO_CHANGE_64;
args->device = 0;
- open_file = find_writable_file(cifsInode, true);
+ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
if (open_file) {
u16 nfid = open_file->fid.netfid;
u32 npid = open_file->pid;
@@ -2531,7 +2535,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
rc = 0;
if (attrs->ia_valid & ATTR_MTIME) {
- rc = cifs_get_writable_file(cifsInode, false, &wfile);
+ rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
if (!rc) {
tcon = tlink_tcon(wfile->tlink);
rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index eb994e313c6a..b130efaf8feb 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -766,7 +766,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
struct cifs_tcon *tcon;
/* if the file is already open for write, just use that fileid */
- open_file = find_writable_file(cinode, true);
+ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
if (open_file) {
fid.netfid = open_file->fid.netfid;
netpid = open_file->pid;
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1cf207564ff9..a8c301ae00ed 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -521,7 +521,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
- cifs_get_writable_path(tcon, name, &cfile);
+ cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, ACL_NO_MODE,
@@ -577,7 +577,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
{
struct cifsFileInfo *cfile;
- cifs_get_writable_path(tcon, from_name, &cfile);
+ cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
return smb2_set_path_attr(xid, tcon, from_name, to_name,
cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e47190cae163..c31e84ee3c39 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1364,6 +1364,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
cfile->fid.persistent_fid = fid->persistent_fid;
cfile->fid.volatile_fid = fid->volatile_fid;
+ cfile->fid.access = fid->access;
#ifdef CONFIG_CIFS_DEBUG2
cfile->fid.mid = fid->mid;
#endif /* CIFS_DEBUG2 */
@@ -3327,7 +3328,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
* some servers (Windows2016) will not reflect recent writes in
* QUERY_ALLOCATED_RANGES until SMB2_flush is called.
*/
- wrcfile = find_writable_file(cifsi, false);
+ wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
if (wrcfile) {
filemap_write_and_wait(inode->i_mapping);
smb2_flush_file(xid, tcon, &wrcfile->fid);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 1234f9ccab03..28c0be5e69b7 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2771,6 +2771,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
atomic_inc(&tcon->num_remote_opens);
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
+ oparms->fid->access = oparms->desired_access;
#ifdef CONFIG_CIFS_DEBUG2
oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
#endif /* CIFS_DEBUG2 */
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index 65cb09fa6ead..08c9f216a54d 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -539,6 +539,15 @@ int fscrypt_drop_inode(struct inode *inode)
mk = ci->ci_master_key->payload.data[0];
/*
+ * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
+ * protected by the key were cleaned by sync_filesystem(). But if
+ * userspace is still using the files, inodes can be dirtied between
+ * then and now. We mustn't lose any writes, so skip dirty inodes here.
+ */
+ if (inode->i_state & I_DIRTY_ALL)
+ return 0;
+
+ /*
* Note: since we aren't holding ->mk_secret_sem, the result here can
* immediately become outdated. But there's no correctness problem with
* unnecessarily evicting. Nor is there a correctness problem with not
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 634b09d18b77..db987b5110a9 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -1090,21 +1090,12 @@ static const struct file_operations fops_regset32 = {
* This function creates a file in debugfs with the given name that reports
* the names and values of a set of 32-bit registers. If the @mode variable
* is so set it can be read from. Writing is not supported.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset)
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return debugfs_create_file(name, mode, parent, regset, &fops_regset32);
+ debugfs_create_file(name, mode, parent, regset, &fops_regset32);
}
EXPORT_SYMBOL_GPL(debugfs_create_regset32);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ff1b764b0c0e..0c7c4adb664e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2391,7 +2391,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct flex_groups **old_groups, **new_groups;
- int size, i;
+ int size, i, j;
if (!sbi->s_log_groups_per_flex)
return 0;
@@ -2412,8 +2412,8 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
sizeof(struct flex_groups)),
GFP_KERNEL);
if (!new_groups[i]) {
- for (i--; i >= sbi->s_flex_groups_allocated; i--)
- kvfree(new_groups[i]);
+ for (j = sbi->s_flex_groups_allocated; j < i; j++)
+ kvfree(new_groups[j]);
kvfree(new_groups);
ext4_msg(sb, KERN_ERR,
"not enough memory for %d flex groups", size);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 594b05ae16c9..71946da84388 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -750,6 +750,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
return NULL;
init_rwsem(&ei->truncate_lock);
+ /* Zeroing to allow iput() even if partial initialized inode. */
+ ei->mmu_private = 0;
+ ei->i_start = 0;
+ ei->i_logstart = 0;
+ ei->i_attrs = 0;
+ ei->i_pos = 0;
+
return &ei->vfs_inode;
}
@@ -1374,16 +1381,6 @@ out:
return 0;
}
-static void fat_dummy_inode_init(struct inode *inode)
-{
- /* Initialize this dummy inode to work as no-op. */
- MSDOS_I(inode)->mmu_private = 0;
- MSDOS_I(inode)->i_start = 0;
- MSDOS_I(inode)->i_logstart = 0;
- MSDOS_I(inode)->i_attrs = 0;
- MSDOS_I(inode)->i_pos = 0;
-}
-
static int fat_read_root(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1844,13 +1841,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
- fat_dummy_inode_init(fat_inode);
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
- fat_dummy_inode_init(fsinfo_inode);
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 9bc167562ee8..2e4c0fa2074b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -735,8 +735,9 @@ static void send_sigio_to_task(struct task_struct *p,
return;
switch (signum) {
- kernel_siginfo_t si;
- default:
+ default: {
+ kernel_siginfo_t si;
+
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
SI_KERNEL, since kernel signals always get
@@ -769,6 +770,7 @@ static void send_sigio_to_task(struct task_struct *p,
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, type))
break;
+ }
/* fall-through - fall back on the old plain SIGIO signal */
case 0:
do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2716d56ed0a0..8294851a9dd9 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (!(file->f_mode & FMODE_OPENED))
return finish_no_open(file, d);
dput(d);
- return 0;
+ return excl && (flags & O_CREAT) ? -EEXIST : 0;
}
BUG_ON(d != NULL);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 0a5ab1a8f69a..5cef075c0b37 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -502,7 +502,7 @@ next:
if (worker->mm)
work->flags |= IO_WQ_WORK_HAS_MM;
- if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
+ if (wq->get_work) {
put_work = work;
wq->get_work(work);
}
@@ -535,42 +535,23 @@ next:
} while (1);
}
-static inline void io_worker_spin_for_work(struct io_wqe *wqe)
-{
- int i = 0;
-
- while (++i < 1000) {
- if (io_wqe_run_queue(wqe))
- break;
- if (need_resched())
- break;
- cpu_relax();
- }
-}
-
static int io_wqe_worker(void *data)
{
struct io_worker *worker = data;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
- bool did_work;
io_worker_start(wqe, worker);
- did_work = false;
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
set_current_state(TASK_INTERRUPTIBLE);
loop:
- if (did_work)
- io_worker_spin_for_work(wqe);
spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) {
__set_current_state(TASK_RUNNING);
io_worker_handle_work(worker);
- did_work = true;
goto loop;
}
- did_work = false;
/* drops the lock on success, retry */
if (__io_worker_idle(wqe, worker)) {
__release(&wqe->lock);
@@ -766,6 +747,17 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
return true;
}
+static void io_run_cancel(struct io_wq_work *work)
+{
+ do {
+ struct io_wq_work *old_work = work;
+
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ work = (work == old_work) ? NULL : work;
+ } while (work);
+}
+
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
@@ -779,8 +771,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
* It's close enough to not be an issue, fork() has the same delay.
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
- work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ io_run_cancel(work);
return;
}
@@ -919,8 +910,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
- work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ io_run_cancel(work);
return IO_WQ_CANCEL_OK;
}
@@ -995,8 +985,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
- work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ io_run_cancel(work);
return IO_WQ_CANCEL_OK;
}
@@ -1068,42 +1057,6 @@ enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
return ret;
}
-struct io_wq_flush_data {
- struct io_wq_work work;
- struct completion done;
-};
-
-static void io_wq_flush_func(struct io_wq_work **workptr)
-{
- struct io_wq_work *work = *workptr;
- struct io_wq_flush_data *data;
-
- data = container_of(work, struct io_wq_flush_data, work);
- complete(&data->done);
-}
-
-/*
- * Doesn't wait for previously queued work to finish. When this completes,
- * it just means that previously queued work was started.
- */
-void io_wq_flush(struct io_wq *wq)
-{
- struct io_wq_flush_data data;
- int node;
-
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- if (!node_online(node))
- continue;
- init_completion(&data.done);
- INIT_IO_WORK(&data.work, io_wq_flush_func);
- data.work.flags |= IO_WQ_WORK_INTERNAL;
- io_wqe_enqueue(wqe, &data.work);
- wait_for_completion(&data.done);
- }
-}
-
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{
int ret = -ENOMEM, node;
diff --git a/fs/io-wq.h b/fs/io-wq.h
index ccc7d84af57d..e5e15f2c93ec 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -8,7 +8,6 @@ enum {
IO_WQ_WORK_HAS_MM = 2,
IO_WQ_WORK_HASHED = 4,
IO_WQ_WORK_UNBOUND = 32,
- IO_WQ_WORK_INTERNAL = 64,
IO_WQ_WORK_CB = 128,
IO_WQ_WORK_NO_CANCEL = 256,
IO_WQ_WORK_CONCURRENT = 512,
@@ -79,16 +78,10 @@ struct io_wq_work {
pid_t task_pid;
};
-#define INIT_IO_WORK(work, _func) \
- do { \
- (work)->list.next = NULL; \
- (work)->func = _func; \
- (work)->files = NULL; \
- (work)->mm = NULL; \
- (work)->creds = NULL; \
- (work)->fs = NULL; \
- (work)->flags = 0; \
- } while (0) \
+#define INIT_IO_WORK(work, _func) \
+ do { \
+ *(work) = (struct io_wq_work){ .func = _func }; \
+ } while (0) \
typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *);
@@ -106,7 +99,6 @@ void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
-void io_wq_flush(struct io_wq *wq);
void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index de650df9ac53..c06082bb039a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -183,19 +183,15 @@ struct fixed_file_table {
struct file **files;
};
-enum {
- FFD_F_ATOMIC,
-};
-
struct fixed_file_data {
struct fixed_file_table *table;
struct io_ring_ctx *ctx;
struct percpu_ref refs;
struct llist_head put_llist;
- unsigned long state;
struct work_struct ref_work;
struct completion done;
+ struct rcu_head rcu;
};
struct io_ring_ctx {
@@ -1004,6 +1000,7 @@ static void io_kill_timeout(struct io_kiocb *req)
if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts);
list_del_init(&req->list);
+ req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, 0);
io_put_req(req);
}
@@ -1483,10 +1480,10 @@ static void io_free_req(struct io_kiocb *req)
__attribute__((nonnull))
static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
- io_req_find_next(req, nxtptr);
-
- if (refcount_dec_and_test(&req->refs))
+ if (refcount_dec_and_test(&req->refs)) {
+ io_req_find_next(req, nxtptr);
__io_free_req(req);
+ }
}
static void io_put_req(struct io_kiocb *req)
@@ -1821,6 +1818,10 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
list_add(&req->list, &ctx->poll_list);
else
list_add_tail(&req->list, &ctx->poll_list);
+
+ if ((ctx->flags & IORING_SETUP_SQPOLL) &&
+ wq_has_sleeper(&ctx->sqo_wait))
+ wake_up(&ctx->sqo_wait);
}
static void io_file_put(struct io_submit_state *state)
@@ -2071,7 +2072,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
ssize_t ret;
ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
*iovec = NULL;
- return ret;
+ return ret < 0 ? ret : sqe_len;
}
if (req->io) {
@@ -3002,6 +3003,11 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+#endif
+
if (!io || req->opcode == IORING_OP_SEND)
return 0;
/* iovec is already imported */
@@ -3154,6 +3160,11 @@ static int io_recvmsg_prep(struct io_kiocb *req,
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+#endif
+
if (!io || req->opcode == IORING_OP_RECV)
return 0;
/* iovec is already imported */
@@ -4705,11 +4716,21 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_kiocb *linked_timeout;
struct io_kiocb *nxt = NULL;
+ const struct cred *old_creds = NULL;
int ret;
again:
linked_timeout = io_prep_linked_timeout(req);
+ if (req->work.creds && req->work.creds != current_cred()) {
+ if (old_creds)
+ revert_creds(old_creds);
+ if (old_creds == req->work.creds)
+ old_creds = NULL; /* restored original creds */
+ else
+ old_creds = override_creds(req->work.creds);
+ }
+
ret = io_issue_sqe(req, sqe, &nxt, true);
/*
@@ -4735,7 +4756,7 @@ punt:
err:
/* drop submission reference */
- io_put_req(req);
+ io_put_req_find_next(req, &nxt);
if (linked_timeout) {
if (!ret)
@@ -4759,6 +4780,8 @@ done_req:
goto punt;
goto again;
}
+ if (old_creds)
+ revert_creds(old_creds);
}
static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -4803,7 +4826,6 @@ static inline void io_queue_link_head(struct io_kiocb *req)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link)
{
- const struct cred *old_creds = NULL;
struct io_ring_ctx *ctx = req->ctx;
unsigned int sqe_flags;
int ret, id;
@@ -4818,14 +4840,12 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
id = READ_ONCE(sqe->personality);
if (id) {
- const struct cred *personality_creds;
-
- personality_creds = idr_find(&ctx->personality_idr, id);
- if (unlikely(!personality_creds)) {
+ req->work.creds = idr_find(&ctx->personality_idr, id);
+ if (unlikely(!req->work.creds)) {
ret = -EINVAL;
goto err_req;
}
- old_creds = override_creds(personality_creds);
+ get_cred(req->work.creds);
}
/* same numerical values with corresponding REQ_F_*, safe to copy */
@@ -4837,8 +4857,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
err_req:
io_cqring_add_event(req, ret);
io_double_put_req(req);
- if (old_creds)
- revert_creds(old_creds);
return false;
}
@@ -4899,8 +4917,6 @@ err_req:
}
}
- if (old_creds)
- revert_creds(old_creds);
return true;
}
@@ -5081,9 +5097,8 @@ static int io_sq_thread(void *data)
const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
- unsigned inflight;
unsigned long timeout;
- int ret;
+ int ret = 0;
complete(&ctx->completions[1]);
@@ -5091,39 +5106,19 @@ static int io_sq_thread(void *data)
set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
- ret = timeout = inflight = 0;
+ timeout = jiffies + ctx->sq_thread_idle;
while (!kthread_should_park()) {
unsigned int to_submit;
- if (inflight) {
+ if (!list_empty(&ctx->poll_list)) {
unsigned nr_events = 0;
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- /*
- * inflight is the count of the maximum possible
- * entries we submitted, but it can be smaller
- * if we dropped some of them. If we don't have
- * poll entries available, then we know that we
- * have nothing left to poll for. Reset the
- * inflight count to zero in that case.
- */
- mutex_lock(&ctx->uring_lock);
- if (!list_empty(&ctx->poll_list))
- io_iopoll_getevents(ctx, &nr_events, 0);
- else
- inflight = 0;
- mutex_unlock(&ctx->uring_lock);
- } else {
- /*
- * Normal IO, just pretend everything completed.
- * We don't have to poll completions for that.
- */
- nr_events = inflight;
- }
-
- inflight -= nr_events;
- if (!inflight)
+ mutex_lock(&ctx->uring_lock);
+ if (!list_empty(&ctx->poll_list))
+ io_iopoll_getevents(ctx, &nr_events, 0);
+ else
timeout = jiffies + ctx->sq_thread_idle;
+ mutex_unlock(&ctx->uring_lock);
}
to_submit = io_sqring_entries(ctx);
@@ -5152,7 +5147,7 @@ static int io_sq_thread(void *data)
* more IO, we should wait for the application to
* reap events and wake us up.
*/
- if (inflight ||
+ if (!list_empty(&ctx->poll_list) ||
(!time_after(jiffies, timeout) && ret != -EBUSY &&
!percpu_ref_is_dying(&ctx->refs))) {
cond_resched();
@@ -5162,6 +5157,19 @@ static int io_sq_thread(void *data)
prepare_to_wait(&ctx->sqo_wait, &wait,
TASK_INTERRUPTIBLE);
+ /*
+ * While doing polled IO, before going to sleep, we need
+ * to check if there are new reqs added to poll_list, it
+ * is because reqs may have been punted to io worker and
+ * will be added to poll_list later, hence check the
+ * poll_list again.
+ */
+ if ((ctx->flags & IORING_SETUP_IOPOLL) &&
+ !list_empty_careful(&ctx->poll_list)) {
+ finish_wait(&ctx->sqo_wait, &wait);
+ continue;
+ }
+
/* Tell userspace we may need a wakeup call */
ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
/* make sure to read SQ tail after writing flags */
@@ -5189,8 +5197,7 @@ static int io_sq_thread(void *data)
mutex_lock(&ctx->uring_lock);
ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
mutex_unlock(&ctx->uring_lock);
- if (ret > 0)
- inflight += ret;
+ timeout = jiffies + ctx->sq_thread_idle;
}
set_fs(old_fs);
@@ -5324,6 +5331,26 @@ static void io_file_ref_kill(struct percpu_ref *ref)
complete(&data->done);
}
+static void __io_file_ref_exit_and_free(struct rcu_head *rcu)
+{
+ struct fixed_file_data *data = container_of(rcu, struct fixed_file_data,
+ rcu);
+ percpu_ref_exit(&data->refs);
+ kfree(data);
+}
+
+static void io_file_ref_exit_and_free(struct rcu_head *rcu)
+{
+ /*
+ * We need to order our exit+free call against the potentially
+ * existing call_rcu() for switching to atomic. One way to do that
+ * is to have this rcu callback queue the final put and free, as we
+ * could otherwise have a pre-existing atomic switch complete _after_
+ * the free callback we queued.
+ */
+ call_rcu(rcu, __io_file_ref_exit_and_free);
+}
+
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
struct fixed_file_data *data = ctx->file_data;
@@ -5336,14 +5363,13 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
flush_work(&data->ref_work);
wait_for_completion(&data->done);
io_ring_file_ref_flush(data);
- percpu_ref_exit(&data->refs);
__io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
for (i = 0; i < nr_tables; i++)
kfree(data->table[i].files);
kfree(data->table);
- kfree(data);
+ call_rcu(&data->rcu, io_file_ref_exit_and_free);
ctx->file_data = NULL;
ctx->nr_user_files = 0;
return 0;
@@ -5595,7 +5621,6 @@ static void io_ring_file_ref_switch(struct work_struct *work)
data = container_of(work, struct fixed_file_data, ref_work);
io_ring_file_ref_flush(data);
- percpu_ref_get(&data->refs);
percpu_ref_switch_to_percpu(&data->refs);
}
@@ -5771,8 +5796,13 @@ static void io_atomic_switch(struct percpu_ref *ref)
{
struct fixed_file_data *data;
+ /*
+ * Juggle reference to ensure we hit zero, if needed, so we can
+ * switch back to percpu mode
+ */
data = container_of(ref, struct fixed_file_data, refs);
- clear_bit(FFD_F_ATOMIC, &data->state);
+ percpu_ref_put(&data->refs);
+ percpu_ref_get(&data->refs);
}
static bool io_queue_file_removal(struct fixed_file_data *data,
@@ -5795,11 +5825,7 @@ static bool io_queue_file_removal(struct fixed_file_data *data,
llist_add(&pfile->llist, &data->put_llist);
if (pfile == &pfile_stack) {
- if (!test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
- percpu_ref_put(&data->refs);
- percpu_ref_switch_to_atomic(&data->refs,
- io_atomic_switch);
- }
+ percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
wait_for_completion(&done);
flush_work(&data->ref_work);
return false;
@@ -5873,10 +5899,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
up->offset++;
}
- if (ref_switch && !test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
- percpu_ref_put(&data->refs);
+ if (ref_switch)
percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
- }
return done ? done : err;
}
@@ -6334,6 +6358,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_sqe_buffer_unregister(ctx);
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
+ idr_destroy(&ctx->personality_idr);
#if defined(CONFIG_UNIX)
if (ctx->ring_sock) {
@@ -6647,6 +6672,7 @@ out_fput:
return submitted ? submitted : ret;
}
+#ifdef CONFIG_PROC_FS
static int io_uring_show_cred(int id, void *p, void *data)
{
const struct cred *cred = p;
@@ -6720,6 +6746,7 @@ static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
percpu_ref_put(&ctx->refs);
}
}
+#endif
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
@@ -6731,7 +6758,9 @@ static const struct file_operations io_uring_fops = {
#endif
.poll = io_uring_poll,
.fasync = io_uring_fasync,
+#ifdef CONFIG_PROC_FS
.show_fdinfo = io_uring_show_fdinfo,
+#endif
};
static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index d181948c0390..3dccc23cf010 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1150,8 +1150,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
/* For undo access buffer must have data copied */
if (undo && !jh->b_committed_data)
goto out;
- if (jh->b_transaction != handle->h_transaction &&
- jh->b_next_transaction != handle->h_transaction)
+ if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
+ READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
goto out;
/*
* There are two reasons for the barrier here:
@@ -2569,8 +2569,8 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh)
* our jh reference and thus __jbd2_journal_file_buffer() must not
* take a new one.
*/
- jh->b_transaction = jh->b_next_transaction;
- jh->b_next_transaction = NULL;
+ WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
+ WRITE_ONCE(jh->b_next_transaction, NULL);
if (buffer_freed(bh))
jlist = BJ_Forget;
else if (jh->b_modified)
diff --git a/fs/locks.c b/fs/locks.c
index 44b6da032842..426b55d333d5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
{
int status = -ENOENT;
- /*
- * If fl_blocker is NULL, it won't be set again as this thread
- * "owns" the lock and is the only one that might try to claim
- * the lock. So it is safe to test fl_blocker locklessly.
- * Also if fl_blocker is NULL, this waiter is not listed on
- * fl_blocked_requests for some lock, so no other request can
- * be added to the list of fl_blocked_requests for this
- * request. So if fl_blocker is NULL, it is safe to
- * locklessly check if fl_blocked_requests is empty. If both
- * of these checks succeed, there is no need to take the lock.
- */
- if (waiter->fl_blocker == NULL &&
- list_empty(&waiter->fl_blocked_requests))
- return status;
spin_lock(&blocked_lock_lock);
if (waiter->fl_blocker)
status = 0;
diff --git a/fs/open.c b/fs/open.c
index 0788b3715731..b69d6eed67e6 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -860,9 +860,6 @@ cleanup_file:
* the return value of d_splice_alias(), then the caller needs to perform dput()
* on it after finish_open().
*
- * On successful return @file is a fully instantiated open file. After this, if
- * an error occurs in ->atomic_open(), it needs to clean up with fput().
- *
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
diff --git a/fs/zonefs/Kconfig b/fs/zonefs/Kconfig
index fb87ad372e29..ef2697b78820 100644
--- a/fs/zonefs/Kconfig
+++ b/fs/zonefs/Kconfig
@@ -2,6 +2,7 @@ config ZONEFS_FS
tristate "zonefs filesystem support"
depends on BLOCK
depends on BLK_DEV_ZONED
+ select FS_IOMAP
help
zonefs is a simple file system which exposes zones of a zoned block
device (e.g. host-managed or host-aware SMR disk drives) as files.
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 8bc6ef82d693..69aee3dfb660 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -601,13 +601,13 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
/*
- * For async direct IOs to sequential zone files, ignore IOCB_NOWAIT
+ * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
* as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned).
*/
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb)
- && (iocb->ki_flags & IOCB_NOWAIT))
- iocb->ki_flags &= ~IOCB_NOWAIT;
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
+ (iocb->ki_flags & IOCB_NOWAIT))
+ return -EOPNOTSUPP;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a2583c2bc054..4defed58ea33 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -532,11 +532,12 @@ typedef u64 acpi_integer;
strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
/*
- * Algorithm to obtain access bit width.
+ * Algorithm to obtain access bit or byte width.
* Can be used with access_width of struct acpi_generic_address and access_size of
* struct acpi_resource_generic_register.
*/
#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
/*******************************************************************************
*
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
index 4e6dc840b159..9ecb3c1f0f15 100644
--- a/include/crypto/curve25519.h
+++ b/include/crypto/curve25519.h
@@ -33,7 +33,8 @@ bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+ (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
curve25519_arch(mypublic, secret, basepoint);
else
curve25519_generic(mypublic, secret, basepoint);
@@ -49,7 +50,8 @@ __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
CURVE25519_KEY_SIZE)))
return false;
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+ (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
curve25519_base_arch(pub, secret);
else
curve25519_generic(pub, secret, curve25519_base_point);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index bcb39da9adb4..41725d88d27e 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -81,7 +81,7 @@ struct drm_dp_vcpi {
* &drm_dp_mst_topology_mgr.base.lock.
* @num_sdp_stream_sinks: Number of stream sinks. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
- * @available_pbn: Available bandwidth for this port. Protected by
+ * @full_pbn: Max possible bandwidth for this port. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @next: link to next port on this branch device
* @aux: i2c aux transport to talk to device connected to this port, protected
@@ -126,7 +126,7 @@ struct drm_dp_mst_port {
u8 dpcd_rev;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
- uint16_t available_pbn;
+ uint16_t full_pbn;
struct list_head next;
/**
* @mstb: the branch device connected to this port, if there is one.
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index e34a7b7f848a..294b2931c4cc 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -96,6 +96,11 @@ struct drm_gem_shmem_object {
* The address are un-mapped when the count reaches zero.
*/
unsigned int vmap_use_count;
+
+ /**
+ * @map_cached: map object cached (instead of using writecombine).
+ */
+ bool map_cached;
};
#define to_drm_gem_shmem_obj(obj) \
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 4bbb5f1c8b5b..48ea093ff04c 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -56,6 +56,19 @@
})
/**
+ * FIELD_MAX() - produce the maximum value representable by a field
+ * @_mask: shifted mask defining the field's length and position
+ *
+ * FIELD_MAX() returns the maximum value that can be held in the field
+ * specified by @_mask.
+ */
+#define FIELD_MAX(_mask) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
+ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
+ })
+
+/**
* FIELD_FIT() - check if value fits in the field
* @_mask: shifted mask defining the field's length and position
* @_val: value to test against the field
@@ -110,6 +123,7 @@ static __always_inline u64 field_mask(u64 field)
{
return field / field_multiplier(field);
}
+#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 053ea4b51988..f629d40c645c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -524,7 +524,7 @@ struct request_queue {
unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex;
#endif
/*
@@ -1494,7 +1494,6 @@ static inline void put_dev_sector(Sector p)
}
int kblockd_schedule_work(struct work_struct *work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7bb2d8de9f30..3b6ff5902edc 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
- struct blk_trace *bt = (q)->blk_trace; \
+ struct blk_trace *bt; \
+ \
+ rcu_read_lock(); \
+ bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
- if (likely(!bt))
- return false;
- return bt->act_mask & BLK_TC_NOTIFY;
+ struct blk_trace *bt;
+ bool ret;
+
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
+ rcu_read_unlock();
+ return ret;
}
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a11d5b7dbbf3..a7cd5c7a2509 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -36,7 +36,7 @@ struct bpf_cgroup_storage_map;
struct bpf_storage_buffer {
struct rcu_head rcu;
- char data[0];
+ char data[];
};
struct bpf_cgroup_storage {
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 49b1a70e12c8..6015a4daf118 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -859,7 +859,7 @@ struct bpf_prog_array_item {
struct bpf_prog_array {
struct rcu_head rcu;
- struct bpf_prog_array_item items[0];
+ struct bpf_prog_array_item items[];
};
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
@@ -885,7 +885,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
- preempt_disable(); \
+ migrate_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
@@ -898,7 +898,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
} \
_out: \
rcu_read_unlock(); \
- preempt_enable(); \
+ migrate_enable(); \
_ret; \
})
@@ -932,7 +932,7 @@ _out: \
u32 ret; \
u32 _ret = 1; \
u32 _cn = 0; \
- preempt_disable(); \
+ migrate_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
_item = &_array->items[0]; \
@@ -944,7 +944,7 @@ _out: \
_item++; \
} \
rcu_read_unlock(); \
- preempt_enable(); \
+ migrate_enable(); \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \
@@ -961,6 +961,36 @@ _out: \
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
+/*
+ * Block execution of BPF programs attached to instrumentation (perf,
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+ *
+ * Use the preemption safe inc/dec variants on RT because migrate disable
+ * is preemptible on RT and preemption in the middle of the RMW operation
+ * might lead to inconsistent state. Use the raw variants for non RT
+ * kernels as migrate_disable() maps to preempt_disable() so the slightly
+ * more expensive save operation can be avoided.
+ */
+static inline void bpf_disable_instrumentation(void)
+{
+ migrate_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ this_cpu_inc(bpf_prog_active);
+ else
+ __this_cpu_inc(bpf_prog_active);
+}
+
+static inline void bpf_enable_instrumentation(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ this_cpu_dec(bpf_prog_active);
+ else
+ __this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+}
+
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
@@ -993,6 +1023,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index d7ddebd0cdec..e75d2191226b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -62,6 +62,7 @@ struct css_task_iter {
struct list_head *mg_tasks_head;
struct list_head *dying_tasks_head;
+ struct list_head *cur_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
struct task_struct *cur_task;
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 6b64b6cc2175..07e547c02fd8 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -198,7 +198,7 @@ enum dccp_role {
struct dccp_service_list {
__u32 dccpsl_nr;
- __be32 dccpsl_list[0];
+ __be32 dccpsl_list[];
};
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 3d013de64f70..43efcc49f061 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -127,9 +127,9 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
@@ -304,11 +304,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_regset32(const char *name,
- umode_t mode, struct dentry *parent,
- struct debugfs_regset32 *regset)
+static inline void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return ERR_PTR(-ENODEV);
}
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
diff --git a/include/linux/device.h b/include/linux/device.h
index 3e40533d2037..1311f276f533 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -798,6 +798,17 @@ static inline struct device_node *dev_of_node(struct device *dev)
return dev->of_node;
}
+static inline bool dev_has_sync_state(struct device *dev)
+{
+ if (!dev)
+ return false;
+ if (dev->driver && dev->driver->sync_state)
+ return true;
+ if (dev->bus && dev->bus->sync_state)
+ return true;
+ return false;
+}
+
/*
* High level routines for use by the bus drivers
*/
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 95991e4300bf..be355f37337d 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -177,8 +177,57 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src);
+#define ETHTOOL_COALESCE_RX_USECS BIT(0)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1)
+#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3)
+#define ETHTOOL_COALESCE_TX_USECS BIT(4)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5)
+#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7)
+#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10)
+#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11)
+#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13)
+#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15)
+#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16)
+#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18)
+#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
+#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+
+#define ETHTOOL_COALESCE_USECS \
+ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
+#define ETHTOOL_COALESCE_MAX_FRAMES \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES)
+#define ETHTOOL_COALESCE_USECS_IRQ \
+ (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ)
+#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX)
+#define ETHTOOL_COALESCE_USECS_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH)
+#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH)
+#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \
+ ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+
/**
* struct ethtool_ops - optional netdev operations
+ * @supported_coalesce_params: supported types of interrupt coalescing.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -207,8 +256,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* or zero.
* @get_coalesce: Get interrupt coalescing parameters. Returns a negative
* error code or zero.
- * @set_coalesce: Set interrupt coalescing parameters. Returns a negative
- * error code or zero.
+ * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing
+ * types should be set in @supported_coalesce_params.
+ * Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
* @get_pauseparam: Report pause parameters
@@ -292,7 +342,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
- * queue has this number, ignore the inapplicable fields.
+ * queue has this number, ignore the inapplicable fields. Supported
+ * coalescing types should be set in @supported_coalesce_params.
* Returns a negative error code or zero.
* @get_link_ksettings: Get various device settings including Ethernet link
* settings. The %cmd and %link_mode_masks_nwords fields should be
@@ -323,6 +374,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 supported_coalesce_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -420,4 +472,10 @@ struct ethtool_rx_flow_rule *
ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd);
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex);
+
+
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f349e2c0884c..43b5e455d2f5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -561,7 +561,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
u32 ret; \
- cant_sleep(); \
+ cant_migrate(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *stats; \
u64 start = sched_clock(); \
@@ -576,8 +576,30 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
} \
ret; })
-#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
- bpf_dispatcher_nopfunc)
+#define BPF_PROG_RUN(prog, ctx) \
+ __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nopfunc)
+
+/*
+ * Use in preemptible and therefore migratable context to make sure that
+ * the execution of the BPF program runs on one CPU.
+ *
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+ *
+ * For non RT enabled kernels migrate_disable/enable() maps to
+ * preempt_disable/enable(), i.e. it disables also preemption.
+ */
+static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
+{
+ u32 ret;
+
+ migrate_disable();
+ ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nopfunc);
+ migrate_enable();
+ return ret;
+}
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
@@ -655,6 +677,7 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data;
}
+/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
@@ -680,9 +703,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
{
u32 res;
- preempt_disable();
+ migrate_disable();
res = __bpf_prog_run_save_cb(prog, skb);
- preempt_enable();
+ migrate_enable();
return res;
}
@@ -695,9 +718,7 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- preempt_disable();
- res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 39faaaf843e1..ce9ed1c0602f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -2,24 +2,17 @@
#ifndef _INET_DIAG_H_
#define _INET_DIAG_H_ 1
+#include <net/netlink.h>
#include <uapi/linux/inet_diag.h>
-struct net;
-struct sock;
struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ int (*dump_one)(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk,
@@ -40,18 +33,25 @@ struct inet_diag_handler {
__u16 idiag_info_size;
};
+struct bpf_sk_storage_diag;
+struct inet_diag_dump_data {
+ struct nlattr *req_nlas[__INET_DIAG_REQ_MAX];
+#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE]
+#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
+
+ struct bpf_sk_storage_diag *bpf_stg_diag;
+};
+
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin);
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -62,6 +62,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+ return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ + nla_total_size(1) /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+ + nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(1) /* INET_DIAG_SKV6ONLY */
+#endif
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4); /* INET_DIAG_CLASS_ID */
+}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns, bool net_admin);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0d9db2a14f44..9b7a8d74a9d6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -257,6 +257,13 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+#ifndef CONFIG_PREEMPT_RT
+# define cant_migrate() cant_sleep()
+#else
+ /* Placeholder for now */
+# define cant_migrate() do { } while (0)
+#endif
+
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7944ad6ac10b..bcb9b2ac0791 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1344,7 +1344,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
struct kvm_vcpu *kvm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
diff --git a/include/linux/mdio-xpcs.h b/include/linux/mdio-xpcs.h
new file mode 100644
index 000000000000..9a841aa5982d
--- /dev/null
+++ b/include/linux/mdio-xpcs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ */
+
+#ifndef __LINUX_MDIO_XPCS_H
+#define __LINUX_MDIO_XPCS_H
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+struct mdio_xpcs_args {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ struct mii_bus *bus;
+ int addr;
+};
+
+struct mdio_xpcs_ops {
+ int (*validate)(struct mdio_xpcs_args *xpcs,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+ int (*config)(struct mdio_xpcs_args *xpcs,
+ const struct phylink_link_state *state);
+ int (*get_state)(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state);
+ int (*link_up)(struct mdio_xpcs_args *xpcs, int speed,
+ phy_interface_t interface);
+ int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface);
+};
+
+#if IS_ENABLED(CONFIG_MDIO_XPCS)
+struct mdio_xpcs_ops *mdio_xpcs_get_ops(void);
+#else
+static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __LINUX_MDIO_XPCS_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f99cbe249425..44bf95d63677 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -518,9 +518,11 @@ struct mlx5_rate_limit {
};
struct mlx5_rl_entry {
- struct mlx5_rate_limit rl;
- u16 index;
- u16 refcount;
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
+ u16 index;
+ u64 refcount;
+ u16 uid;
+ u8 dedicated : 1;
};
struct mlx5_rl_table {
@@ -1008,6 +1010,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index);
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 98e667b176ef..c16827eeba9c 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -70,8 +70,30 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+
+/* Reg C0 usage:
+ * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) >
+ *
+ * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num,
+ * the rest (lowest 16 bits) is left for tc chain tag restoration.
+ * VHCA_ID + VPORT comprise the SOURCE_PORT matching.
+ */
+#define ESW_VHCA_ID_BITS 8
+#define ESW_VPORT_BITS 8
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS)
+#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\
+ 0)
+
+static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
@@ -88,17 +110,29 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
}
static inline bool
+mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline bool
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return false;
};
static inline u32
-mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
int vport_num)
{
return 0;
};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return 0;
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 4cae16016b2b..a5cf5c76f348 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -84,6 +84,7 @@ enum {
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_SLOW_PATH,
+ FDB_PER_VPORT,
};
struct mlx5_pkt_reformat;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index bfdf41537cf1..2bd920965bd3 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -414,7 +414,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
- u8 reserved_at_19[0x7];
+ u8 reformat_and_fwd_to_table[0x1];
+ u8 reserved_at_1a[0x6];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@@ -741,7 +742,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
- u8 reserved_at_1b[0x1];
+ u8 egress_acl_forward_to_vport[0x1];
u8 fdb_multi_path_to_table[0x1];
u8 reserved_at_1d[0x3];
@@ -813,7 +814,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
- u8 reserved_at_7[0x19];
+ u8 reserved_at_7[0x4];
+ u8 packet_pacing_uid[0x1];
+ u8 reserved_at_c[0x14];
u8 reserved_at_20[0x20];
@@ -8265,9 +8268,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_set_pp_rate_limit_context_bits {
+ u8 rate_limit[0x20];
+
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_60[0x120];
+};
+
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8277,14 +8291,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20];
- u8 rate_limit[0x20];
-
- u8 burst_upper_bound[0x20];
-
- u8 reserved_at_c0[0x10];
- u8 typical_packet_size[0x10];
-
- u8 reserved_at_e0[0x120];
+ struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
@@ -8420,7 +8427,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
- u8 reserved_at_24[0x1c];
+ u8 reserved_at_24[0xc];
+ u8 data_rate_oper[0x10];
u8 ext_eth_proto_capability[0x20];
@@ -10486,7 +10494,8 @@ enum {
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
};
struct mlx5_ifc_tls_static_params_bits {
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 37e065a80a43..07d77323f78a 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits {
struct mlx5_ifc_tls_resp_bits {
u8 syndrome[0x20];
u8 stream_id[0x20];
- u8 reserverd[0x40];
+ u8 reserved[0x40];
};
#define MLX5_TLS_COMMAND_SIZE (0x100)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 52269e56c514..c54fb96cb1e6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2715,6 +2715,10 @@ static inline bool debug_pagealloc_enabled_static(void)
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+/*
+ * When called in DEBUG_PAGEALLOC context, the call should most likely be
+ * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
+ */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6c3f7032e8d9..654808bfad83 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -664,7 +664,7 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
struct rps_map {
unsigned int len;
struct rcu_head rcu;
- u16 cpus[0];
+ u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
@@ -686,7 +686,7 @@ struct rps_dev_flow {
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
- struct rps_dev_flow flows[0];
+ struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
@@ -704,7 +704,7 @@ struct rps_dev_flow_table {
struct rps_sock_flow_table {
u32 mask;
- u32 ents[0] ____cacheline_aligned_in_smp;
+ u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
@@ -767,7 +767,7 @@ struct xps_map {
unsigned int len;
unsigned int alloc_len;
struct rcu_head rcu;
- u16 queues[0];
+ u16 queues[];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
@@ -778,7 +778,7 @@ struct xps_map {
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
+ struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
};
#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
@@ -853,6 +853,7 @@ enum tc_setup_type {
TC_SETUP_FT,
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
+ TC_SETUP_QDISC_FIFO,
};
/* These structures hold the attributes of bpf state that are being passed
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 205fa7b1f07a..788969ccbbde 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -188,10 +188,10 @@ struct netlink_callback {
struct module *module;
struct netlink_ext_ack *extack;
u16 family;
- u16 min_dump_alloc;
- bool strict_check;
u16 answer_flags;
+ u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ bool strict_check;
union {
u8 ctx[48];
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3840a541a9de..fc54b8922e66 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -42,6 +42,13 @@
#include <linux/pci_ids.h>
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_SIG_TARGET_ABORT | \
+ PCI_STATUS_PARITY)
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -1045,6 +1052,8 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+u64 pci_get_dsn(struct pci_dev *dev);
+
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
@@ -1203,6 +1212,7 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
+int pci_status_get_and_clear_errors(struct pci_dev *pdev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -1699,6 +1709,9 @@ static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
{ return 0; }
+static inline u64 pci_get_dsn(struct pci_dev *dev)
+{ return 0; }
+
/* Power management related routines */
static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e72dbd0d2d6a..7a08023bdbc5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -358,6 +358,7 @@ struct macsec_ops;
* is_gigabit_capable: Set to true if PHY supports 1000Mbps
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
+ * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
* sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* loopback_enabled: Set true if this phy has been loopbacked successfully.
* state: state of the PHY for management purposes
@@ -397,6 +398,7 @@ struct phy_device {
unsigned is_gigabit_capable:1;
unsigned has_fixups:1;
unsigned suspended:1;
+ unsigned suspended_by_mdio_bus:1;
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
@@ -558,6 +560,7 @@ struct phy_driver {
/*
* Checks if the PHY generated an interrupt.
* For multi-PHY devices with shared PHY interrupt pin
+ * Set interrupt bits have to be cleared.
*/
int (*did_interrupt)(struct phy_device *phydev);
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 0bf9fddb8306..3b400b1919a9 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -11,6 +11,7 @@ struct omap2_mcspi_platform_config {
unsigned short num_cs;
unsigned int regs_offset;
unsigned int pin_dir:1;
+ size_t max_xfer_len;
};
struct omap2_mcspi_device_config {
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 276a03c24691..041bfa412aa0 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -24,7 +24,7 @@ struct platform_device {
int id;
bool id_auto;
struct device dev;
- u64 dma_mask;
+ u64 platform_dma_mask;
u32 num_resources;
struct resource *resource;
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index bbb68dba37cc..bc3f1aecaa19 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -322,4 +322,34 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
+/**
+ * migrate_disable - Prevent migration of the current task
+ *
+ * Maps to preempt_disable() which also disables preemption. Use
+ * migrate_disable() to annotate that the intent is to prevent migration,
+ * but not necessarily preemption.
+ *
+ * Can be invoked nested like preempt_disable() and needs the corresponding
+ * number of migrate_enable() invocations.
+ */
+static __always_inline void migrate_disable(void)
+{
+ preempt_disable();
+}
+
+/**
+ * migrate_enable - Allow migration of the current task
+ *
+ * Counterpart to migrate_disable().
+ *
+ * As migrate_disable() can be invoked nested, only the outermost invocation
+ * reenables migration.
+ *
+ * Currently mapped to preempt_enable().
+ */
+static __always_inline void migrate_enable(void)
+{
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
new file mode 100644
index 000000000000..0820edc0ab7d
--- /dev/null
+++ b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2019 Linaro Ltd. */
+
+#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__
+#define __QCOM_Q6V5_IPA_NOTIFY_H__
+
+#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
+
+#include <linux/remoteproc.h>
+
+enum qcom_rproc_event {
+ MODEM_STARTING = 0, /* Modem is about to be started */
+ MODEM_RUNNING = 1, /* Startup complete; modem is operational */
+ MODEM_STOPPING = 2, /* Modem is about to shut down */
+ MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */
+ MODEM_OFFLINE = 4, /* Modem is now offline */
+ MODEM_REMOVING = 5, /* Modem is about to be removed */
+};
+
+typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event);
+
+struct qcom_rproc_ipa_notify {
+ struct rproc_subdev subdev;
+
+ qcom_ipa_notify_t notify;
+ void *data;
+};
+
+/**
+ * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice
+ * @rproc: rproc handle
+ * @ipa_notify: IPA notification subdevice handle
+ *
+ * Register the @ipa_notify subdevice with the @rproc so modem events
+ * can be sent to IPA when they occur.
+ *
+ * This is defined in "qcom_q6v5_ipa_notify.c".
+ */
+void qcom_add_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify);
+
+/**
+ * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice
+ * @rproc: rproc handle
+ * @ipa_notify: IPA notification subdevice handle
+ *
+ * This is defined in "qcom_q6v5_ipa_notify.c".
+ */
+void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify);
+
+/**
+ * qcom_register_ipa_notify() - Register IPA notification function
+ * @rproc: Remote processor handle
+ * @notify: Non-null IPA notification callback function pointer
+ * @data: Data supplied to IPA notification callback function
+ *
+ * @Return: 0 if successful, or a negative error code otherwise
+ *
+ * This is defined in "qcom_q6v5_mss.c".
+ */
+int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
+ void *data);
+/**
+ * qcom_deregister_ipa_notify() - Deregister IPA notification function
+ * @rproc: Remote processor handle
+ *
+ * This is defined in "qcom_q6v5_mss.c".
+ */
+void qcom_deregister_ipa_notify(struct rproc *rproc);
+
+#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
+struct qcom_rproc_ipa_notify { /* empty */ };
+
+#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */
+#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */
+
+#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
+#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index beb9a9da1699..70ebef866cc8 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key(
/**
* rhashtable_lookup_get_insert_key - lookup and insert object into hash table
* @ht: hash table
+ * @key: key
* @obj: pointer to hash head inside object
* @params: hash table parameters
- * @data: pointer to element data already in hashes
*
* Just like rhashtable_lookup_insert_key(), but this function returns the
* object if it exists, NULL if it does not and the insertion was successful,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5b50278c4bc8..21749b2cdc9b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3514,23 +3514,15 @@ int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err,
+ unsigned int flags, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err);
+ unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 19190c609282..fbafb353e9be 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -80,6 +80,7 @@
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
+ unsigned int has_xpcs;
int *irqs;
int probed_phy_irq;
bool needs_reset;
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 1646c06989df..0ce4377545f8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -46,9 +46,12 @@
#define CDC_NCM_DATA_ALTSETTING_NCM 1
#define CDC_NCM_DATA_ALTSETTING_MBIM 2
-/* CDC NCM subclass 3.2.1 */
+/* CDC NCM subclass 3.3.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+/* CDC NCM subclass 3.3.2 */
+#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20
+
/* Maximum NTB length */
#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
@@ -84,7 +87,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
-#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
+#define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -113,7 +116,11 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
- struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ u8 is_ndp16;
+ union {
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ struct usb_cdc_ncm_ndp32 *delayed_ndp32;
+ };
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -150,6 +157,8 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset);
struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4261d1c6e87b..e48554e6526c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
*
* We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU.
+ *
+ * Memory-ordering properties: If it returns %true, guarantees that all stores
+ * preceding the call to queue_work() in the program order will be visible from
+ * the CPU which will execute @work by the time such work executes, e.g.,
+ *
+ * { x is initially 0 }
+ *
+ * CPU0 CPU1
+ *
+ * WRITE_ONCE(x, 1); [ @work is being executed ]
+ * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
+ *
+ * Forbids: r0 == true && r1 == 0
*/
static inline bool queue_work(struct workqueue_struct *wq,
struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
* This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global
* workqueue otherwise.
+ *
+ * Shares the same memory-ordering properties of queue_work(), cf. the
+ * DocBook header of queue_work().
*/
static inline bool schedule_work(struct work_struct *work)
{
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 71347a90a9d1..41337c7fc728 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -41,6 +41,7 @@ struct tc_action {
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
+ u8 hw_stats_type;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -52,6 +53,9 @@ struct tc_action {
#define tcf_rate_est common.tcfa_rate_est
#define tcf_lock common.tcfa_lock
+#define TCA_ACT_HW_STATS_TYPE_ANY (TCA_ACT_HW_STATS_TYPE_IMMEDIATE | \
+ TCA_ACT_HW_STATS_TYPE_DELAYED)
+
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 17e10fba2152..f42fdddecd41 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -27,7 +27,7 @@ struct unix_address {
refcount_t refcnt;
int len;
unsigned int hash;
- struct sockaddr_un name[0];
+ struct sockaddr_un name[];
};
struct unix_skb_parms {
@@ -42,7 +42,7 @@ struct unix_skb_parms {
} __randomize_layout;
struct scm_stat {
- u32 nr_fds;
+ atomic_t nr_fds;
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 3d56b026bb9e..dc2ce31a1f52 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -183,7 +183,7 @@ struct slave {
struct bond_up_slave {
unsigned int count;
struct rcu_head rcu;
- struct slave *arr[0];
+ struct slave *arr[];
};
/*
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 8e4f831d2e52..5036c94c0503 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -10,14 +10,41 @@ void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+struct bpf_sk_storage_diag;
+struct sk_buff;
+struct nlattr;
+struct sock;
+
#ifdef CONFIG_BPF_SYSCALL
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
+struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs);
+void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag);
+int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size);
#else
static inline int bpf_sk_storage_clone(const struct sock *sk,
struct sock *newsk)
{
return 0;
}
+static inline struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla)
+{
+ return NULL;
+}
+static inline void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
+{
+}
+static inline int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ return 0;
+}
#endif
#endif /* _BPF_SK_STORAGE_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 6dd2213c5eb2..ccc6e9df178b 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -90,7 +90,7 @@ struct dn_fib_table {
int (*flush)(struct dn_fib_table *t);
int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
- unsigned char data[0];
+ unsigned char data[];
};
#ifdef CONFIG_DECNET_ROUTER
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 7d3d84f0ef42..beeb81a532e3 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -540,6 +540,12 @@ struct dsa_switch_ops {
/*
* TC integration
*/
+ int (*cls_flower_add)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_del)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_stats)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
int (*port_mirror_add)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 54e227e6b06a..a259050f84af 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
[FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
[FRA_PRIORITY] = { .type = NLA_U32 }, \
[FRA_FWMARK] = { .type = NLA_U32 }, \
+ [FRA_TUN_ID] = { .type = NLA_U64 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 4e864c34a1b0..efd8d47f6997 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/netlink.h>
#include <net/flow_dissector.h>
#include <linux/rhashtable.h>
@@ -68,6 +69,10 @@ struct flow_match_enc_opts {
struct flow_dissector_key_enc_opts *key, *mask;
};
+struct flow_match_ct {
+ struct flow_dissector_key_ct *key, *mask;
+};
+
struct flow_rule;
void flow_rule_match_meta(const struct flow_rule *rule,
@@ -110,6 +115,8 @@ void flow_rule_match_enc_keyid(const struct flow_rule *rule,
struct flow_match_enc_keyid *out);
void flow_rule_match_enc_opts(const struct flow_rule *rule,
struct flow_match_enc_opts *out);
+void flow_rule_match_ct(const struct flow_rule *rule,
+ struct flow_match_ct *out);
enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
@@ -135,6 +142,7 @@ enum flow_action_id {
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
FLOW_ACTION_CT,
+ FLOW_ACTION_CT_METADATA,
FLOW_ACTION_MPLS_PUSH,
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
@@ -154,6 +162,22 @@ enum flow_action_mangle_base {
FLOW_ACT_MANGLE_HDR_TYPE_UDP,
};
+enum flow_action_hw_stats_type_bit {
+ FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE_BIT,
+ FLOW_ACTION_HW_STATS_TYPE_DELAYED_BIT,
+};
+
+enum flow_action_hw_stats_type {
+ FLOW_ACTION_HW_STATS_TYPE_DISABLED = 0,
+ FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE =
+ BIT(FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE_BIT),
+ FLOW_ACTION_HW_STATS_TYPE_DELAYED =
+ BIT(FLOW_ACTION_HW_STATS_TYPE_DELAYED_BIT),
+ FLOW_ACTION_HW_STATS_TYPE_ANY =
+ FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE |
+ FLOW_ACTION_HW_STATS_TYPE_DELAYED,
+};
+
typedef void (*action_destr)(void *priv);
struct flow_action_cookie {
@@ -168,6 +192,7 @@ void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
struct flow_action_entry {
enum flow_action_id id;
+ enum flow_action_hw_stats_type hw_stats_type;
action_destr destructor;
void *destructor_priv;
union {
@@ -206,7 +231,13 @@ struct flow_action_entry {
struct { /* FLOW_ACTION_CT */
int action;
u16 zone;
+ struct nf_flowtable *flow_table;
} ct;
+ struct {
+ unsigned long cookie;
+ u32 mark;
+ u32 labels[4];
+ } ct_metadata;
struct { /* FLOW_ACTION_MPLS_PUSH */
u32 label;
__be16 proto;
@@ -229,7 +260,7 @@ struct flow_action_entry {
struct flow_action {
unsigned int num_entries;
- struct flow_action_entry entries[0];
+ struct flow_action_entry entries[];
};
static inline bool flow_action_has_entries(const struct flow_action *action)
@@ -249,7 +280,78 @@ static inline bool flow_offload_has_one_action(const struct flow_action *action)
}
#define flow_action_for_each(__i, __act, __actions) \
- for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
+ for (__i = 0, __act = &(__actions)->entries[0]; \
+ __i < (__actions)->num_entries; \
+ __act = &(__actions)->entries[++__i])
+
+static inline bool
+flow_action_mixed_hw_stats_types_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *action_entry;
+ u8 uninitialized_var(last_hw_stats_type);
+ int i;
+
+ if (flow_offload_has_one_action(action))
+ return true;
+
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats_type != last_hw_stats_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats_type = action_entry->hw_stats_type;
+ }
+ return true;
+}
+
+static inline const struct flow_action_entry *
+flow_action_first_entry_get(const struct flow_action *action)
+{
+ WARN_ON(!flow_action_has_entries(action));
+ return &action->entries[0];
+}
+
+static inline bool
+__flow_action_hw_stats_types_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ bool check_allow_bit,
+ enum flow_action_hw_stats_type_bit allow_bit)
+{
+ const struct flow_action_entry *action_entry;
+
+ if (!flow_action_has_entries(action))
+ return true;
+ if (!flow_action_mixed_hw_stats_types_check(action, extack))
+ return false;
+ action_entry = flow_action_first_entry_get(action);
+ if (!check_allow_bit &&
+ action_entry->hw_stats_type != FLOW_ACTION_HW_STATS_TYPE_ANY) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
+ return false;
+ } else if (check_allow_bit &&
+ !(action_entry->hw_stats_type & BIT(allow_bit))) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
+ return false;
+ }
+ return true;
+}
+
+static inline bool
+flow_action_hw_stats_types_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ enum flow_action_hw_stats_type_bit allow_bit)
+{
+ return __flow_action_hw_stats_types_check(action, extack,
+ true, allow_bit);
+}
+
+static inline bool
+flow_action_basic_hw_stats_types_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ return __flow_action_hw_stats_types_check(action, extack, false, 0);
+}
struct flow_rule {
struct flow_match match;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 34c4436fd18f..a7ce00af6c44 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -52,7 +52,7 @@ struct ip_options {
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
- unsigned char __data[0];
+ unsigned char __data[];
};
struct ip_options_rcu {
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fd60a8ac02ee..6ec26e4d7f11 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -198,7 +198,7 @@ struct fib6_info {
struct rcu_head rcu;
struct nexthop *nh;
- struct fib6_nh fib6_nh[0];
+ struct fib6_nh fib6_nh[];
};
struct rt6_info {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index b69c16cbbf71..f7543c095b33 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -16,7 +16,7 @@ struct route_info {
reserved_h:3;
#endif
__be32 lifetime;
- __u8 prefix[0]; /* 0,8 or 16 */
+ __u8 prefix[]; /* 0,8 or 16 */
};
#include <net/addrconf.h>
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 6a1ae49809de..dabe398bee4c 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -153,7 +153,7 @@ struct fib_info {
bool nh_updated;
struct nexthop *nh;
struct rcu_head rcu;
- struct fib_nh fib_nh[0];
+ struct fib_nh fib_nh[];
};
@@ -250,7 +250,7 @@ struct fib_table {
int tb_num_default;
struct rcu_head rcu;
unsigned long *tb_data;
- unsigned long __data[0];
+ unsigned long __data[];
};
struct fib_dump_filter {
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 5d6c5b1fc695..b5e6edf74b70 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -30,7 +30,7 @@ struct lwtunnel_state {
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
struct rcu_head rcu;
- __u8 data[0];
+ __u8 data[];
};
struct lwtunnel_encap_ops {
diff --git a/include/net/mip6.h b/include/net/mip6.h
index f1c28971c362..67cd7e50804c 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -25,7 +25,7 @@ struct ip6_mh {
__u8 ip6mh_reserved;
__u16 ip6mh_cksum;
/* Followed by type specific messages */
- __u8 data[0];
+ __u8 data[];
} __packed;
#define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */
diff --git a/include/net/mld.h b/include/net/mld.h
index b0f5b3105ef0..496bddb59942 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -24,12 +24,12 @@ struct mld2_grec {
__u8 grec_auxwords;
__be16 grec_nsrcs;
struct in6_addr grec_mca;
- struct in6_addr grec_src[0];
+ struct in6_addr grec_src[];
};
struct mld2_report {
struct icmp6hdr mld2r_hdr;
- struct mld2_grec mld2r_grec[0];
+ struct mld2_grec mld2r_grec[];
};
#define mld2r_type mld2r_hdr.icmp6_type
@@ -55,7 +55,7 @@ struct mld2_query {
#endif
__u8 mld2q_qqic;
__be16 mld2q_nsrcs;
- struct in6_addr mld2q_srcs[0];
+ struct in6_addr mld2q_srcs[];
};
#define mld2q_type mld2q_hdr.icmp6_type
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 6b4759eae158..9deb3a3735da 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -11,7 +11,7 @@ struct mpls_iptunnel_encap {
u8 ttl_propagate;
u8 default_ttl;
u8 reserved1;
- u32 label[0];
+ u32 label[];
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b5ebeb3b0de0..1c61aeb3a1c0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -80,12 +80,12 @@ extern struct neigh_table nd_tbl;
struct nd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
- __u8 opt[0];
+ __u8 opt[];
};
struct rs_msg {
struct icmp6hdr icmph;
- __u8 opt[0];
+ __u8 opt[];
};
struct ra_msg {
@@ -98,7 +98,7 @@ struct rd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
struct in6_addr dest;
- __u8 opt[0];
+ __u8 opt[];
};
struct nd_opt_hdr {
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 8ec77bfdc1a4..e1476775769c 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -174,7 +174,7 @@ struct pneigh_entry {
struct net_device *dev;
u8 flags;
u8 protocol;
- u8 key[0];
+ u8 key[];
};
/*
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index e0f709d9d547..6890f1ca3e31 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -16,6 +16,29 @@ struct nf_flow_rule;
struct flow_offload;
enum flow_offload_tuple_dir;
+struct nf_flow_key {
+ struct flow_dissector_key_meta meta;
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nf_flow_match {
+ struct flow_dissector dissector;
+ struct nf_flow_key key;
+ struct nf_flow_key mask;
+};
+
+struct nf_flow_rule {
+ struct nf_flow_match match;
+ struct flow_rule *rule;
+};
+
struct nf_flowtable_type {
struct list_head list;
int family;
@@ -44,6 +67,7 @@ struct nf_flowtable {
struct delayed_work gc_work;
unsigned int flags;
struct flow_block flow_block;
+ struct mutex flow_block_lock; /* Guards flow_block */
possible_net_t net;
};
@@ -129,10 +153,18 @@ struct nf_flow_route {
struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv);
+void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv);
+
int flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow);
+
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
void nf_flow_table_cleanup(struct net_device *dev);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 08b98414d94e..154b8f01499b 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -101,6 +101,7 @@ struct netns_ipv4 {
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
+ int sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 331ebbc94fe7..c440ccc861fc 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -73,7 +73,7 @@ struct nh_group {
u16 num_nh;
bool mpath;
bool has_v4;
- struct nh_grp_entry nh_entries[0];
+ struct nh_grp_entry nh_entries[];
};
struct nexthop {
diff --git a/include/net/pie.h b/include/net/pie.h
index fd5a37cb7993..3fe2361e03b4 100644
--- a/include/net/pie.h
+++ b/include/net/pie.h
@@ -8,7 +8,7 @@
#include <net/inet_ecn.h>
#include <net/pkt_sched.h>
-#define MAX_PROB U64_MAX
+#define MAX_PROB (U64_MAX >> BITS_PER_BYTE)
#define DTIME_INVALID U64_MAX
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
@@ -38,16 +38,15 @@ struct pie_params {
/**
* struct pie_vars - contains pie variables
- * @qdelay: current queue delay
- * @qdelay_old: queue delay in previous qdelay calculation
- * @burst_time: burst time allowance
- * @dq_tstamp: timestamp at which dq rate was last calculated
- * @prob: drop probability
- * @accu_prob: accumulated drop probability
- * @dq_count: number of bytes dequeued in a measurement cycle
- * @avg_dq_rate: calculated average dq rate
- * @qlen_old: queue length during previous qdelay calculation
- * @accu_prob_overflows: number of times accu_prob overflows
+ * @qdelay: current queue delay
+ * @qdelay_old: queue delay in previous qdelay calculation
+ * @burst_time: burst time allowance
+ * @dq_tstamp: timestamp at which dq rate was last calculated
+ * @prob: drop probability
+ * @accu_prob: accumulated drop probability
+ * @dq_count: number of bytes dequeued in a measurement cycle
+ * @avg_dq_rate: calculated average dq rate
+ * @backlog_old: queue backlog during previous qdelay calculation
*/
struct pie_vars {
psched_time_t qdelay;
@@ -58,8 +57,7 @@ struct pie_vars {
u64 accu_prob;
u64 dq_count;
u32 avg_dq_rate;
- u32 qlen_old;
- u8 accu_prob_overflows;
+ u32 backlog_old;
};
/**
@@ -107,7 +105,6 @@ static inline void pie_vars_init(struct pie_vars *vars)
vars->accu_prob = 0;
vars->dq_count = DQCOUNT_INVALID;
vars->avg_dq_rate = 0;
- vars->accu_prob_overflows = 0;
}
static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
@@ -127,12 +124,12 @@ static inline void pie_set_enqueue_time(struct sk_buff *skb)
}
bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
- struct pie_vars *vars, u32 qlen, u32 packet_size);
+ struct pie_vars *vars, u32 backlog, u32 packet_size);
void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
- struct pie_vars *vars, u32 qlen);
+ struct pie_vars *vars, u32 backlog);
void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
- u32 qlen);
+ u32 backlog);
#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 53946b509b51..dbc89452f90b 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -72,6 +72,10 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
+int tcf_classify_ingress(struct sk_buff *skb,
+ const struct tcf_block *ingress_block,
+ const struct tcf_proto *tp, struct tcf_result *res,
+ bool compat_mode);
#else
static inline bool tcf_block_shared(struct tcf_block *block)
@@ -133,6 +137,15 @@ static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
{
return TC_ACT_UNSPEC;
}
+
+static inline int tcf_classify_ingress(struct sk_buff *skb,
+ const struct tcf_block *ingress_block,
+ const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ return TC_ACT_UNSPEC;
+}
+
#endif
static inline unsigned long
@@ -881,4 +894,19 @@ struct tc_tbf_qopt_offload {
};
};
+enum tc_fifo_command {
+ TC_FIFO_REPLACE,
+ TC_FIFO_DESTROY,
+ TC_FIFO_STATS,
+};
+
+struct tc_fifo_qopt_offload {
+ enum tc_fifo_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_qopt_offload_stats stats;
+ };
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6a70845bd9ab..20d2c6419612 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -181,7 +181,7 @@ struct tc_taprio_qopt_offload {
u64 cycle_time_extension;
size_t num_entries;
- struct tc_taprio_sched_entry entries[0];
+ struct tc_taprio_sched_entry entries[];
};
/* Reference counting */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 151208704ed2..bcdf98d21094 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1269,6 +1269,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
*/
struct mini_Qdisc {
struct tcf_proto *filter_list;
+ struct tcf_block *block;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct rcu_head rcu;
@@ -1295,6 +1296,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block);
static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 314a2fa21d6b..fb42c90348d3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -326,7 +326,7 @@ struct sctp_cookie {
* the association TCB is re-constructed from the cookie.
*/
__u32 raw_addr_list_len;
- struct sctp_init_chunk peer_init[0];
+ struct sctp_init_chunk peer_init[];
};
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 3ecaa15d1850..505f1e18e9bf 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -24,7 +24,7 @@ struct sock_reuseport {
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
- struct sock *socks[0]; /* array of sock pointers */
+ struct sock *socks[]; /* array of sock pointers */
};
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index a8b156402873..79654bcb9a29 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -25,6 +25,9 @@ struct tcf_ct_params {
u16 ct_action;
struct rcu_head rcu;
+
+ struct tcf_ct_flow_table *ct_ft;
+ struct nf_flowtable *nf_ft;
};
struct tcf_ct {
@@ -48,11 +51,27 @@ static inline int tcf_ct_action(const struct tc_action *a)
return to_ct_params(a)->ct_action;
}
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return to_ct_params(a)->nf_ft;
+}
+
#else
static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return NULL;
+}
#endif /* CONFIG_NF_CONNTRACK */
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie);
+#else
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
+#endif
+
static inline bool is_tcf_ct(const struct tc_action *a)
{
#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 068f96b1a83e..007b584cc431 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -11,6 +11,66 @@
#include <linux/regmap.h>
#include <net/dsa.h>
+/* Port Group IDs (PGID) are masks of destination ports.
+ *
+ * For L2 forwarding, the switch performs 3 lookups in the PGID table for each
+ * frame, and forwards the frame to the ports that are present in the logical
+ * AND of all 3 PGIDs.
+ *
+ * These PGID lookups are:
+ * - In one of PGID[0-63]: for the destination masks. There are 2 paths by
+ * which the switch selects a destination PGID:
+ * - The {DMAC, VID} is present in the MAC table. In that case, the
+ * destination PGID is given by the DEST_IDX field of the MAC table entry
+ * that matched.
+ * - The {DMAC, VID} is not present in the MAC table (it is unknown). The
+ * frame is disseminated as being either unicast, multicast or broadcast,
+ * and according to that, the destination PGID is chosen as being the
+ * value contained by ANA_FLOODING_FLD_UNICAST,
+ * ANA_FLOODING_FLD_MULTICAST or ANA_FLOODING_FLD_BROADCAST.
+ * The destination PGID can be an unicast set: the first PGIDs, 0 to
+ * ocelot->num_phys_ports - 1, or a multicast set: the PGIDs from
+ * ocelot->num_phys_ports to 63. By convention, a unicast PGID corresponds to
+ * a physical port and has a single bit set in the destination ports mask:
+ * that corresponding to the port number itself. In contrast, a multicast
+ * PGID will have potentially more than one single bit set in the destination
+ * ports mask.
+ * - In one of PGID[64-79]: for the aggregation mask. The switch classifier
+ * dissects each frame and generates a 4-bit Link Aggregation Code which is
+ * used for this second PGID table lookup. The goal of link aggregation is to
+ * hash multiple flows within the same LAG on to different destination ports.
+ * The first lookup will result in a PGID with all the LAG members present in
+ * the destination ports mask, and the second lookup, by Link Aggregation
+ * Code, will ensure that each flow gets forwarded only to a single port out
+ * of that mask (there are no duplicates).
+ * - In one of PGID[80-90]: for the source mask. The third time, the PGID table
+ * is indexed with the ingress port (plus 80). These PGIDs answer the
+ * question "is port i allowed to forward traffic to port j?" If yes, then
+ * BIT(j) of PGID 80+i will be found set. The third PGID lookup can be used
+ * to enforce the L2 forwarding matrix imposed by e.g. a Linux bridge.
+ */
+
+/* Reserve some destination PGIDs at the end of the range:
+ * PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses
+ * of the switch port net devices, towards the CPU port module.
+ * PGID_UC: the flooding destinations for unknown unicast traffic.
+ * PGID_MC: the flooding destinations for broadcast and non-IP multicast
+ * traffic.
+ * PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic.
+ * PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic.
+ */
+#define PGID_CPU 59
+#define PGID_UC 60
+#define PGID_MC 61
+#define PGID_MCIPV4 62
+#define PGID_MCIPV6 63
+
+/* Aggregation PGIDs, one per Link Aggregation Code */
+#define PGID_AGGR 64
+
+/* Source PGIDs, one per physical port */
+#define PGID_SRC 80
+
#define IFH_INJ_BYPASS BIT(31)
#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
@@ -402,10 +462,14 @@ enum ocelot_tag_prefix {
struct ocelot;
struct ocelot_ops {
- void (*pcs_init)(struct ocelot *ocelot, int port);
int (*reset)(struct ocelot *ocelot);
};
+struct ocelot_acl_block {
+ struct list_head rules;
+ int count;
+};
+
struct ocelot_port {
struct ocelot *ocelot;
@@ -447,14 +511,22 @@ struct ocelot {
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
+ /* In tables like ANA:PORT and the ANA:PGID:PGID mask,
+ * the CPU is located after the physical ports (at the
+ * num_phys_ports index).
+ */
u8 num_phys_ports;
- u8 num_cpu_ports;
- u8 cpu;
u32 *lags;
struct list_head multicast;
+ struct ocelot_acl_block acl_block;
+
+ const struct vcap_field *vcap_is2_keys;
+ const struct vcap_field *vcap_is2_actions;
+ const struct vcap_props *vcap;
+
/* Workqueue to check statistics for overflow with its lock */
struct mutex stats_lock;
u64 *stats;
@@ -469,8 +541,6 @@ struct ocelot {
struct mutex ptp_lock;
/* Protects the PTP clock */
spinlock_t ptp_clock_lock;
-
- void (*port_pcs_init)(struct ocelot_port *port);
};
#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
@@ -500,9 +570,9 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
@@ -541,5 +611,11 @@ int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
struct sk_buff *skb);
void ocelot_get_txtstamp(struct ocelot *ocelot);
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
#endif
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
index 0a50d53bbd3f..7c08437061fc 100644
--- a/include/soc/mscc/ocelot_dev.h
+++ b/include/soc/mscc/ocelot_dev.h
@@ -74,7 +74,7 @@
#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
#define DEV_MAC_ADV_CHK_CFG 0x2c
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
new file mode 100644
index 000000000000..5748373ab4d3
--- /dev/null
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_VCAP_H_
+#define _OCELOT_VCAP_H_
+
+/* =================================================================
+ * VCAP Common
+ * =================================================================
+ */
+
+enum {
+ /* VCAP_IS1, */
+ VCAP_IS2,
+ /* VCAP_ES0, */
+};
+
+struct vcap_props {
+ u16 tg_width; /* Type-group width (in bits) */
+ u16 sw_count; /* Sub word count */
+ u16 entry_count; /* Entry count */
+ u16 entry_words; /* Number of entry words */
+ u16 entry_width; /* Entry width (in bits) */
+ u16 action_count; /* Action count */
+ u16 action_words; /* Number of action words */
+ u16 action_width; /* Action width (in bits) */
+ u16 action_type_width; /* Action type width (in bits) */
+ struct {
+ u16 width; /* Action type width (in bits) */
+ u16 count; /* Action type sub word count */
+ } action_table[2];
+ u16 counter_words; /* Number of counter words */
+ u16 counter_width; /* Counter width (in bits) */
+};
+
+/* VCAP Type-Group values */
+#define VCAP_TG_NONE 0 /* Entry is invalid */
+#define VCAP_TG_FULL 1 /* Full entry */
+#define VCAP_TG_HALF 2 /* Half entry */
+#define VCAP_TG_QUARTER 3 /* Quarter entry */
+
+/* =================================================================
+ * VCAP IS2
+ * =================================================================
+ */
+
+/* IS2 half key types */
+#define IS2_TYPE_ETYPE 0
+#define IS2_TYPE_LLC 1
+#define IS2_TYPE_SNAP 2
+#define IS2_TYPE_ARP 3
+#define IS2_TYPE_IP_UDP_TCP 4
+#define IS2_TYPE_IP_OTHER 5
+#define IS2_TYPE_IPV6 6
+#define IS2_TYPE_OAM 7
+#define IS2_TYPE_SMAC_SIP6 8
+#define IS2_TYPE_ANY 100 /* Pseudo type */
+
+/* IS2 half key type mask for matching any IP */
+#define IS2_TYPE_MASK_IP_ANY 0xe
+
+enum {
+ IS2_ACTION_TYPE_NORMAL,
+ IS2_ACTION_TYPE_SMAC_SIP,
+ IS2_ACTION_TYPE_MAX,
+};
+
+/* IS2 MASK_MODE values */
+#define IS2_ACT_MASK_MODE_NONE 0
+#define IS2_ACT_MASK_MODE_FILTER 1
+#define IS2_ACT_MASK_MODE_POLICY 2
+#define IS2_ACT_MASK_MODE_REDIR 3
+
+/* IS2 REW_OP values */
+#define IS2_ACT_REW_OP_NONE 0
+#define IS2_ACT_REW_OP_PTP_ONE 2
+#define IS2_ACT_REW_OP_PTP_TWO 3
+#define IS2_ACT_REW_OP_SPECIAL 8
+#define IS2_ACT_REW_OP_PTP_ORG 9
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
+
+#define VCAP_PORT_WIDTH 4
+
+/* IS2 quarter key - SMAC_SIP4 */
+#define IS2_QKO_IGR_PORT 0
+#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
+#define IS2_QKL_L2_SMAC 48
+#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
+#define IS2_QKL_L3_IP4_SIP 32
+
+enum vcap_is2_half_key_field {
+ /* Common */
+ VCAP_IS2_TYPE,
+ VCAP_IS2_HK_FIRST,
+ VCAP_IS2_HK_PAG,
+ VCAP_IS2_HK_RSV1,
+ VCAP_IS2_HK_IGR_PORT_MASK,
+ VCAP_IS2_HK_RSV2,
+ VCAP_IS2_HK_HOST_MATCH,
+ VCAP_IS2_HK_L2_MC,
+ VCAP_IS2_HK_L2_BC,
+ VCAP_IS2_HK_VLAN_TAGGED,
+ VCAP_IS2_HK_VID,
+ VCAP_IS2_HK_DEI,
+ VCAP_IS2_HK_PCP,
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ VCAP_IS2_HK_L2_DMAC,
+ VCAP_IS2_HK_L2_SMAC,
+ /* MAC_ETYPE (TYPE=000) */
+ VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ /* MAC_LLC (TYPE=001) */
+ VCAP_IS2_HK_MAC_LLC_DMAC,
+ VCAP_IS2_HK_MAC_LLC_SMAC,
+ VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ /* MAC_SNAP (TYPE=010) */
+ VCAP_IS2_HK_MAC_SNAP_SMAC,
+ VCAP_IS2_HK_MAC_SNAP_DMAC,
+ VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
+ /* MAC_ARP (TYPE=011) */
+ VCAP_IS2_HK_MAC_ARP_SMAC,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ VCAP_IS2_HK_MAC_ARP_OPCODE,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ VCAP_IS2_HK_IP4,
+ VCAP_IS2_HK_L3_FRAGMENT,
+ VCAP_IS2_HK_L3_FRAG_OFS_GT0,
+ VCAP_IS2_HK_L3_OPTIONS,
+ VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ VCAP_IS2_HK_L3_TOS,
+ VCAP_IS2_HK_L3_IP4_DIP,
+ VCAP_IS2_HK_L3_IP4_SIP,
+ VCAP_IS2_HK_DIP_EQ_SIP,
+ /* IP4_TCP_UDP (TYPE=100) */
+ VCAP_IS2_HK_TCP,
+ VCAP_IS2_HK_L4_SPORT,
+ VCAP_IS2_HK_L4_DPORT,
+ VCAP_IS2_HK_L4_RNG,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ VCAP_IS2_HK_L4_URG,
+ VCAP_IS2_HK_L4_ACK,
+ VCAP_IS2_HK_L4_PSH,
+ VCAP_IS2_HK_L4_RST,
+ VCAP_IS2_HK_L4_SYN,
+ VCAP_IS2_HK_L4_FIN,
+ VCAP_IS2_HK_L4_1588_DOM,
+ VCAP_IS2_HK_L4_1588_VER,
+ /* IP4_OTHER (TYPE=101) */
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ /* IP6_STD (TYPE=110) */
+ VCAP_IS2_HK_IP6_L3_TTL_GT0,
+ VCAP_IS2_HK_IP6_L3_PROTO,
+ VCAP_IS2_HK_L3_IP6_SIP,
+ /* OAM (TYPE=111) */
+ VCAP_IS2_HK_OAM_MEL_FLAGS,
+ VCAP_IS2_HK_OAM_VER,
+ VCAP_IS2_HK_OAM_OPCODE,
+ VCAP_IS2_HK_OAM_FLAGS,
+ VCAP_IS2_HK_OAM_MEPID,
+ VCAP_IS2_HK_OAM_CCM_CNTS_EQ0,
+ VCAP_IS2_HK_OAM_IS_Y1731,
+};
+
+struct vcap_field {
+ int offset;
+ int length;
+};
+
+enum vcap_is2_action_field {
+ VCAP_IS2_ACT_HIT_ME_ONCE,
+ VCAP_IS2_ACT_CPU_COPY_ENA,
+ VCAP_IS2_ACT_CPU_QU_NUM,
+ VCAP_IS2_ACT_MASK_MODE,
+ VCAP_IS2_ACT_MIRROR_ENA,
+ VCAP_IS2_ACT_LRN_DIS,
+ VCAP_IS2_ACT_POLICE_ENA,
+ VCAP_IS2_ACT_POLICE_IDX,
+ VCAP_IS2_ACT_POLICE_VCAP_ONLY,
+ VCAP_IS2_ACT_PORT_MASK,
+ VCAP_IS2_ACT_REW_OP,
+ VCAP_IS2_ACT_SMAC_REPLACE_ENA,
+ VCAP_IS2_ACT_RSV,
+ VCAP_IS2_ACT_ACL_ID,
+ VCAP_IS2_ACT_HIT_CNT,
+};
+
+#endif /* _OCELOT_VCAP_H_ */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f0e4f36f83bf..8a2266676b2d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1157,7 +1157,7 @@ struct snd_soc_pcm_runtime {
((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
(i)++)
#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 906e9f2752db..8e98ced0963b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -73,7 +73,7 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
- __u8 data[0]; /* Arbitrary size */
+ __u8 data[]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index be2a2948f452..dfdffc42e87d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -187,6 +187,7 @@ enum devlink_port_flavour {
* for the PCI VF. It is an internal
* port that faces the PCI VF.
*/
+ DEVLINK_PORT_FLAVOUR_VIRTUAL, /* Any virtual port facing the user. */
};
enum devlink_param_cmode {
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 2df8ceca1f9b..6622912c2342 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -272,9 +272,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 41
+#define DM_VERSION_MINOR 42
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2019-09-16)"
+#define DM_VERSION_EXTRA "-ioctl (2020-02-27)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 7e0b460f872c..c7c7a1a550af 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -24,6 +24,14 @@ enum {
ETHTOOL_MSG_DEBUG_SET,
ETHTOOL_MSG_WOL_GET,
ETHTOOL_MSG_WOL_SET,
+ ETHTOOL_MSG_FEATURES_GET,
+ ETHTOOL_MSG_FEATURES_SET,
+ ETHTOOL_MSG_PRIVFLAGS_GET,
+ ETHTOOL_MSG_PRIVFLAGS_SET,
+ ETHTOOL_MSG_RINGS_GET,
+ ETHTOOL_MSG_RINGS_SET,
+ ETHTOOL_MSG_CHANNELS_GET,
+ ETHTOOL_MSG_CHANNELS_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -43,6 +51,15 @@ enum {
ETHTOOL_MSG_DEBUG_NTF,
ETHTOOL_MSG_WOL_GET_REPLY,
ETHTOOL_MSG_WOL_NTF,
+ ETHTOOL_MSG_FEATURES_GET_REPLY,
+ ETHTOOL_MSG_FEATURES_SET_REPLY,
+ ETHTOOL_MSG_FEATURES_NTF,
+ ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
+ ETHTOOL_MSG_PRIVFLAGS_NTF,
+ ETHTOOL_MSG_RINGS_GET_REPLY,
+ ETHTOOL_MSG_RINGS_NTF,
+ ETHTOOL_MSG_CHANNELS_GET_REPLY,
+ ETHTOOL_MSG_CHANNELS_NTF,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -228,6 +245,71 @@ enum {
ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1
};
+/* FEATURES */
+
+enum {
+ ETHTOOL_A_FEATURES_UNSPEC,
+ ETHTOOL_A_FEATURES_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_FEATURES_HW, /* bitset */
+ ETHTOOL_A_FEATURES_WANTED, /* bitset */
+ ETHTOOL_A_FEATURES_ACTIVE, /* bitset */
+ ETHTOOL_A_FEATURES_NOCHANGE, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_FEATURES_CNT,
+ ETHTOOL_A_FEATURES_MAX = __ETHTOOL_A_FEATURES_CNT - 1
+};
+
+/* PRIVFLAGS */
+
+enum {
+ ETHTOOL_A_PRIVFLAGS_UNSPEC,
+ ETHTOOL_A_PRIVFLAGS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PRIVFLAGS_FLAGS, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PRIVFLAGS_CNT,
+ ETHTOOL_A_PRIVFLAGS_MAX = __ETHTOOL_A_PRIVFLAGS_CNT - 1
+};
+
+/* RINGS */
+
+enum {
+ ETHTOOL_A_RINGS_UNSPEC,
+ ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_RINGS_RX_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX_MINI_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX_JUMBO_MAX, /* u32 */
+ ETHTOOL_A_RINGS_TX_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX, /* u32 */
+ ETHTOOL_A_RINGS_RX_MINI, /* u32 */
+ ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */
+ ETHTOOL_A_RINGS_TX, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_RINGS_CNT,
+ ETHTOOL_A_RINGS_MAX = (__ETHTOOL_A_RINGS_CNT - 1)
+};
+
+/* CHANNELS */
+
+enum {
+ ETHTOOL_A_CHANNELS_UNSPEC,
+ ETHTOOL_A_CHANNELS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_CHANNELS_RX_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_TX_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_OTHER_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_COMBINED_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_RX_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_TX_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_OTHER_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_COMBINED_COUNT, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CHANNELS_CNT,
+ ETHTOOL_A_CHANNELS_MAX = (__ETHTOOL_A_CHANNELS_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h
index 683878036d76..b122cfac7128 100644
--- a/include/uapi/linux/if_arcnet.h
+++ b/include/uapi/linux/if_arcnet.h
@@ -60,7 +60,7 @@ struct arc_rfc1201 {
__u8 proto; /* protocol ID field - varies */
__u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
- __u8 payload[0]; /* space remaining in packet (504 bytes)*/
+ __u8 payload[]; /* space remaining in packet (504 bytes)*/
};
#define RFC1201_HDR_SIZE 4
@@ -69,7 +69,7 @@ struct arc_rfc1201 {
*/
struct arc_rfc1051 {
__u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
- __u8 payload[0]; /* 507 bytes */
+ __u8 payload[]; /* 507 bytes */
};
#define RFC1051_HDR_SIZE 1
@@ -80,7 +80,7 @@ struct arc_rfc1051 {
struct arc_eth_encap {
__u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
- __u8 payload[0]; /* 493 bytes */
+ __u8 payload[]; /* 493 bytes */
};
#define ETH_ENCAP_HDR_SIZE 14
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 1521073b6348..8533bf07450f 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -74,6 +74,8 @@ enum {
#define IPPROTO_UDPLITE IPPROTO_UDPLITE
IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
#define IPPROTO_MPLS IPPROTO_MPLS
+ IPPROTO_ETHERNET = 143, /* Ethernet-within-IPv6 Encapsulation */
+#define IPPROTO_ETHERNET IPPROTO_ETHERNET
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_MPTCP = 262, /* Multipath TCP connection */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index a1ff345b3f33..75dffd78363a 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -64,9 +64,11 @@ struct inet_diag_req_raw {
enum {
INET_DIAG_REQ_NONE,
INET_DIAG_REQ_BYTECODE,
+ INET_DIAG_REQ_SK_BPF_STORAGES,
+ __INET_DIAG_REQ_MAX,
};
-#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE
+#define INET_DIAG_REQ_MAX (__INET_DIAG_REQ_MAX - 1)
/* Bytecode is sequence of 4 byte commands followed by variable arguments.
* All the commands identified by "code" are conditional jumps forward:
@@ -154,6 +156,7 @@ enum {
INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
INET_DIAG_MD5SIG,
INET_DIAG_ULP_INFO,
+ INET_DIAG_SK_BPF_STORAGES,
__INET_DIAG_MAX,
};
diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h
index 66048cc5d7b3..67e31f329190 100644
--- a/include/uapi/linux/net_dropmon.h
+++ b/include/uapi/linux/net_dropmon.h
@@ -29,12 +29,12 @@ struct net_dm_config_entry {
struct net_dm_config_msg {
__u32 entries;
- struct net_dm_config_entry options[0];
+ struct net_dm_config_entry options[];
};
struct net_dm_alert_msg {
__u32 entries;
- struct net_dm_drop_point points[0];
+ struct net_dm_drop_point points[];
};
struct net_dm_user_msg {
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 449a63971451..81cc1a869588 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -17,6 +17,7 @@ enum {
TCA_ACT_PAD,
TCA_ACT_COOKIE,
TCA_ACT_FLAGS,
+ TCA_ACT_HW_STATS_TYPE,
__TCA_ACT_MAX
};
@@ -24,6 +25,27 @@ enum {
* actions stats.
*/
+/* tca HW stats type
+ * When user does not pass the attribute, he does not care.
+ * It is the same as if he would pass the attribute with
+ * all supported bits set.
+ * In case no bits are set, user is not interested in getting any HW statistics.
+ */
+#define TCA_ACT_HW_STATS_TYPE_IMMEDIATE (1 << 0) /* Means that in dump, user
+ * gets the current HW stats
+ * state from the device
+ * queried at the dump time.
+ */
+#define TCA_ACT_HW_STATS_TYPE_DELAYED (1 << 1) /* Means that in dump, user gets
+ * HW stats that might be out
+ * of date for some time, maybe
+ * couple of seconds. This is
+ * the case when driver polls
+ * stats updates periodically
+ * or when it gets async stats update
+ * from the device.
+ */
+
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
#define TCA_ACT_MAX_PRIO 32
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index e5925009a652..5f74a5f6091d 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -36,4 +36,30 @@ enum sknetlink_groups {
};
#define SKNLGRP_MAX (__SKNLGRP_MAX - 1)
+enum {
+ SK_DIAG_BPF_STORAGE_REQ_NONE,
+ SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
+ __SK_DIAG_BPF_STORAGE_REQ_MAX,
+};
+
+#define SK_DIAG_BPF_STORAGE_REQ_MAX (__SK_DIAG_BPF_STORAGE_REQ_MAX - 1)
+
+enum {
+ SK_DIAG_BPF_STORAGE_REP_NONE,
+ SK_DIAG_BPF_STORAGE,
+ __SK_DIAG_BPF_STORAGE_REP_MAX,
+};
+
+#define SK_DIAB_BPF_STORAGE_REP_MAX (__SK_DIAG_BPF_STORAGE_REP_MAX - 1)
+
+enum {
+ SK_DIAG_BPF_STORAGE_NONE,
+ SK_DIAG_BPF_STORAGE_PAD,
+ SK_DIAG_BPF_STORAGE_MAP_ID,
+ SK_DIAG_BPF_STORAGE_MAP_VALUE,
+ __SK_DIAG_BPF_STORAGE_MAX,
+};
+
+#define SK_DIAG_BPF_STORAGE_MAX (__SK_DIAG_BPF_STORAGE_MAX - 1)
+
#endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 1a7fc856e237..f2acb2566333 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -312,6 +312,7 @@ enum {
TCP_NLA_REORD_SEEN, /* reordering events seen */
TCP_NLA_SRTT, /* smoothed RTT in usecs */
TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
+ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
index 28e7dcd75e82..f8aa8bac5196 100644
--- a/include/xen/interface/io/tpmif.h
+++ b/include/xen/interface/io/tpmif.h
@@ -46,7 +46,7 @@ struct vtpm_shared_page {
uint8_t pad;
uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
- uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */
+ uint32_t extra_pages[]; /* grant IDs; length in nr_extra_pages */
};
#endif
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 89a889585ba0..850a43bd69d3 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/semaphore.h>
#include <xen/interface/xen.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/xenbus.h>
@@ -76,7 +77,7 @@ struct xenbus_device {
enum xenbus_state state;
struct completion down;
struct work_struct work;
- spinlock_t reclaim_lock;
+ struct semaphore reclaim_sem;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 042f95534f86..c498f0fffb40 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -23,7 +23,7 @@ enum bpf_struct_ops_state {
struct bpf_struct_ops_value {
BPF_STRUCT_OPS_COMMON_VALUE;
- char data[0] ____cacheline_aligned_in_smp;
+ char data[] ____cacheline_aligned_in_smp;
};
struct bpf_struct_ops_map {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index a1468e3f5af2..d541c8486c95 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -27,9 +27,62 @@
.map_delete_batch = \
generic_map_delete_batch
+/*
+ * The bucket lock has two protection scopes:
+ *
+ * 1) Serializing concurrent operations from BPF programs on differrent
+ * CPUs
+ *
+ * 2) Serializing concurrent operations from BPF programs and sys_bpf()
+ *
+ * BPF programs can execute in any context including perf, kprobes and
+ * tracing. As there are almost no limits where perf, kprobes and tracing
+ * can be invoked from the lock operations need to be protected against
+ * deadlocks. Deadlocks can be caused by recursion and by an invocation in
+ * the lock held section when functions which acquire this lock are invoked
+ * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
+ * variable bpf_prog_active, which prevents BPF programs attached to perf
+ * events, kprobes and tracing to be invoked before the prior invocation
+ * from one of these contexts completed. sys_bpf() uses the same mechanism
+ * by pinning the task to the current CPU and incrementing the recursion
+ * protection accross the map operation.
+ *
+ * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
+ * operations like memory allocations (even with GFP_ATOMIC) from atomic
+ * contexts. This is required because even with GFP_ATOMIC the memory
+ * allocator calls into code pathes which acquire locks with long held lock
+ * sections. To ensure the deterministic behaviour these locks are regular
+ * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
+ * true atomic contexts on an RT kernel are the low level hardware
+ * handling, scheduling, low level interrupt handling, NMIs etc. None of
+ * these contexts should ever do memory allocations.
+ *
+ * As regular device interrupt handlers and soft interrupts are forced into
+ * thread context, the existing code which does
+ * spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*();
+ * just works.
+ *
+ * In theory the BPF locks could be converted to regular spinlocks as well,
+ * but the bucket locks and percpu_freelist locks can be taken from
+ * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
+ * atomic contexts even on RT. These mechanisms require preallocated maps,
+ * so there is no need to invoke memory allocations within the lock held
+ * sections.
+ *
+ * BPF maps which need dynamic allocation are only used from (forced)
+ * thread context on RT and can therefore use regular spinlocks which in
+ * turn allows to invoke memory allocations from the lock held section.
+ *
+ * On a non RT kernel this distinction is neither possible nor required.
+ * spinlock maps to raw_spinlock and the extra code is optimized out by the
+ * compiler.
+ */
struct bucket {
struct hlist_nulls_head head;
- raw_spinlock_t lock;
+ union {
+ raw_spinlock_t raw_lock;
+ spinlock_t lock;
+ };
};
struct bpf_htab {
@@ -65,9 +118,54 @@ struct htab_elem {
struct bpf_lru_node lru_node;
};
u32 hash;
- char key[0] __aligned(8);
+ char key[] __aligned(8);
};
+static inline bool htab_is_prealloc(const struct bpf_htab *htab)
+{
+ return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
+}
+
+static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
+{
+ return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
+}
+
+static void htab_init_buckets(struct bpf_htab *htab)
+{
+ unsigned i;
+
+ for (i = 0; i < htab->n_buckets; i++) {
+ INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
+ if (htab_use_raw_lock(htab))
+ raw_spin_lock_init(&htab->buckets[i].raw_lock);
+ else
+ spin_lock_init(&htab->buckets[i].lock);
+ }
+}
+
+static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
+ struct bucket *b)
+{
+ unsigned long flags;
+
+ if (htab_use_raw_lock(htab))
+ raw_spin_lock_irqsave(&b->raw_lock, flags);
+ else
+ spin_lock_irqsave(&b->lock, flags);
+ return flags;
+}
+
+static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ struct bucket *b,
+ unsigned long flags)
+{
+ if (htab_use_raw_lock(htab))
+ raw_spin_unlock_irqrestore(&b->raw_lock, flags);
+ else
+ spin_unlock_irqrestore(&b->lock, flags);
+}
+
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
static bool htab_is_lru(const struct bpf_htab *htab)
@@ -82,11 +180,6 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
-static bool htab_is_prealloc(const struct bpf_htab *htab)
-{
- return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
-}
-
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
@@ -328,8 +421,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab;
- int err, i;
u64 cost;
+ int err;
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
@@ -391,10 +484,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->hashrnd = get_random_int();
- for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
- raw_spin_lock_init(&htab->buckets[i].lock);
- }
+ htab_init_buckets(htab);
if (prealloc) {
err = prealloc_init(htab);
@@ -602,7 +692,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
b = __select_bucket(htab, tgt_l->hash);
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
@@ -610,7 +700,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
break;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return l == tgt_l;
}
@@ -686,15 +776,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
- * we're calling kfree, otherwise deadlock is possible if kprobes
- * are placed somewhere inside of slub
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
htab_elem_free(htab, l);
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
}
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
@@ -884,8 +966,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
*/
}
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -926,7 +1007,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
}
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return ret;
}
@@ -964,8 +1045,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
return -ENOMEM;
memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -984,7 +1064,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
if (ret)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
@@ -1019,8 +1099,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
b = __select_bucket(htab, hash);
head = &b->head;
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1043,7 +1122,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
}
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return ret;
}
@@ -1083,8 +1162,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
return -ENOMEM;
}
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1106,7 +1184,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
}
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
if (l_new)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
return ret;
@@ -1144,7 +1222,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l = lookup_elem_raw(head, hash, key, key_size);
@@ -1154,7 +1232,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
ret = 0;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return ret;
}
@@ -1176,7 +1254,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l = lookup_elem_raw(head, hash, key, key_size);
@@ -1185,7 +1263,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
ret = 0;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
if (l)
bpf_lru_push_free(&htab->lru, &l->lru_node);
return ret;
@@ -1325,8 +1403,7 @@ alloc:
}
again:
- preempt_disable();
- this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
again_nocopy:
dst_key = keys;
@@ -1335,7 +1412,7 @@ again_nocopy:
head = &b->head;
/* do not grab the lock unless need it (bucket_cnt > 0). */
if (locked)
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
bucket_cnt = 0;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
@@ -1352,10 +1429,9 @@ again_nocopy:
/* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it.
*/
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
goto after_loop;
}
@@ -1364,10 +1440,9 @@ again_nocopy:
/* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it.
*/
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
kvfree(keys);
kvfree(values);
goto alloc;
@@ -1418,7 +1493,7 @@ again_nocopy:
dst_val += value_size;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
locked = false;
while (node_to_free) {
@@ -1437,8 +1512,7 @@ next_batch:
}
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
key_size * bucket_cnt) ||
copy_to_user(uvalues + total * value_size, values,
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 56e6c75d354d..65c236cf341e 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -25,7 +25,7 @@ struct lpm_trie_node {
struct lpm_trie_node __rcu *child[2];
u32 prefixlen;
u32 flags;
- u8 data[0];
+ u8 data[];
};
struct lpm_trie {
@@ -34,7 +34,7 @@ struct lpm_trie {
size_t n_entries;
size_t max_prefixlen;
size_t data_size;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
/* This trie implements a longest prefix match algorithm that can be used to
@@ -315,7 +315,7 @@ static int trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
- raw_spin_lock_irqsave(&trie->lock, irq_flags);
+ spin_lock_irqsave(&trie->lock, irq_flags);
/* Allocate and fill a new node */
@@ -422,7 +422,7 @@ out:
kfree(im_node);
}
- raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+ spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret;
}
@@ -442,7 +442,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
- raw_spin_lock_irqsave(&trie->lock, irq_flags);
+ spin_lock_irqsave(&trie->lock, irq_flags);
/* Walk the tree looking for an exact key/length match and keeping
* track of the path we traverse. We will need to know the node
@@ -518,7 +518,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
kfree_rcu(node, rcu);
out:
- raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+ spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret;
}
@@ -575,7 +575,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
if (ret)
goto out_err;
- raw_spin_lock_init(&trie->lock);
+ spin_lock_init(&trie->lock);
return &trie->map;
out_err:
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 6e090140b924..b367430e611c 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -25,12 +25,18 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
free_percpu(s->freelist);
}
+static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
+ struct pcpu_freelist_node *node)
+{
+ node->next = head->first;
+ head->first = node;
+}
+
static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
struct pcpu_freelist_node *node)
{
raw_spin_lock(&head->lock);
- node->next = head->first;
- head->first = node;
+ pcpu_freelist_push_node(head, node);
raw_spin_unlock(&head->lock);
}
@@ -56,21 +62,16 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems)
{
struct pcpu_freelist_head *head;
- unsigned long flags;
int i, cpu, pcpu_entries;
pcpu_entries = nr_elems / num_possible_cpus() + 1;
i = 0;
- /* disable irq to workaround lockdep false positive
- * in bpf usage pcpu_freelist_populate() will never race
- * with pcpu_freelist_push()
- */
- local_irq_save(flags);
for_each_possible_cpu(cpu) {
again:
head = per_cpu_ptr(s->freelist, cpu);
- ___pcpu_freelist_push(head, buf);
+ /* No locking required as this is not visible yet. */
+ pcpu_freelist_push_node(head, buf);
i++;
buf += elem_size;
if (i == nr_elems)
@@ -78,7 +79,6 @@ again:
if (i % pcpu_entries)
goto again;
}
- local_irq_restore(flags);
}
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 3f958b90d914..db76339fe358 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -40,6 +40,9 @@ static void do_up_read(struct irq_work *entry)
{
struct stack_map_irq_work *work;
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
+ return;
+
work = container_of(entry, struct stack_map_irq_work, irq_work);
up_read_non_owner(work->sem);
work->sem = NULL;
@@ -288,10 +291,19 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
struct stack_map_irq_work *work = NULL;
if (irqs_disabled()) {
- work = this_cpu_ptr(&up_read_work);
- if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
- /* cannot queue more up_read, fallback */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ work = this_cpu_ptr(&up_read_work);
+ if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
+ /* cannot queue more up_read, fallback */
+ irq_work_busy = true;
+ }
+ } else {
+ /*
+ * PREEMPT_RT does not allow to trylock mmap sem in
+ * interrupt disabled context. Force the fallback code.
+ */
irq_work_busy = true;
+ }
}
/*
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a91ad518c050..c536c65256ad 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -171,11 +171,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
flags);
}
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
- * inside bpf map update or delete otherwise deadlocks are possible
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_update(map, key, value, flags);
@@ -206,8 +202,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
err = map->ops->map_update_elem(map, key, value, flags);
rcu_read_unlock();
}
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
return err;
@@ -222,8 +217,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
if (bpf_map_is_dev_bound(map))
return bpf_map_offload_lookup_elem(map, key, value);
- preempt_disable();
- this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
@@ -268,8 +262,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
rcu_read_unlock();
}
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
return err;
@@ -909,6 +902,21 @@ void bpf_map_inc_with_uref(struct bpf_map *map)
}
EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
+struct bpf_map *bpf_map_get(u32 ufd)
+{
+ struct fd f = fdget(ufd);
+ struct bpf_map *map;
+
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return map;
+
+ bpf_map_inc(map);
+ fdput(f);
+
+ return map;
+}
+
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
struct fd f = fdget(ufd);
@@ -1136,13 +1144,11 @@ static int map_delete_elem(union bpf_attr *attr)
goto out;
}
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
out:
kfree(key);
@@ -1254,13 +1260,11 @@ int generic_map_delete_batch(struct bpf_map *map,
break;
}
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
if (err)
break;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 6b264a92064b..704fa787fec0 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -367,8 +367,9 @@ out:
mutex_unlock(&trampoline_mutex);
}
-/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that
- * are needed for trampoline. The macro is split into
+/* The logic is similar to BPF_PROG_RUN, but with an explicit
+ * rcu_read_lock() and migrate_disable() which are required
+ * for the trampoline. The macro is split into
* call _bpf_prog_enter
* call prog->bpf_func
* call __bpf_prog_exit
@@ -378,7 +379,7 @@ u64 notrace __bpf_prog_enter(void)
u64 start = 0;
rcu_read_lock();
- preempt_disable();
+ migrate_disable();
if (static_branch_unlikely(&bpf_stats_enabled_key))
start = sched_clock();
return start;
@@ -401,7 +402,7 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
stats->nsecs += sched_clock() - start;
u64_stats_update_end(&stats->syncp);
}
- preempt_enable();
+ migrate_enable();
rcu_read_unlock();
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6d15dfbd4b88..289383edfc8c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8143,26 +8143,48 @@ static bool is_tracing_prog_type(enum bpf_prog_type type)
}
}
+static bool is_preallocated_map(struct bpf_map *map)
+{
+ if (!check_map_prealloc(map))
+ return false;
+ if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
+ return false;
+ return true;
+}
+
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
- /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
- * preallocated hash maps, since doing memory allocation
- * in overflow_handler can crash depending on where nmi got
- * triggered.
+ /*
+ * Validate that trace type programs use preallocated hash maps.
+ *
+ * For programs attached to PERF events this is mandatory as the
+ * perf NMI can hit any arbitrary code sequence.
+ *
+ * All other trace types using preallocated hash maps are unsafe as
+ * well because tracepoint or kprobes can be inside locked regions
+ * of the memory allocator or at a place where a recursion into the
+ * memory allocator would see inconsistent state.
+ *
+ * On RT enabled kernels run-time allocation of all trace type
+ * programs is strictly prohibited due to lock type constraints. On
+ * !RT kernels it is allowed for backwards compatibility reasons for
+ * now, but warnings are emitted so developers are made aware of
+ * the unsafety and can fix their programs before this is enforced.
*/
- if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
- if (!check_map_prealloc(map)) {
+ if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
+ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
- if (map->inner_map_meta &&
- !check_map_prealloc(map->inner_map_meta)) {
- verbose(env, "perf_event programs can only use preallocated inner hash map\n");
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ verbose(env, "trace type programs can only use preallocated hash map\n");
return -EINVAL;
}
+ WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
+ verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
if ((is_tracing_prog_type(prog->type) ||
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index be1a1c83cdd1..f2d7cea86ffe 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -471,6 +471,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
*/
p++;
if (p >= end) {
+ (*pos)++;
return NULL;
} else {
*pos = *p;
@@ -782,7 +783,7 @@ void cgroup1_release_agent(struct work_struct *work)
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
- if (!pathbuf || !agentbuf)
+ if (!pathbuf || !agentbuf || !strlen(agentbuf))
goto out;
spin_lock_irq(&css_set_lock);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 75f687301bbf..3dead0416b91 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3542,21 +3542,21 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+ struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
return psi_show(seq, psi, PSI_IO);
}
static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+ struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
return psi_show(seq, psi, PSI_MEM);
}
static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+ struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
return psi_show(seq, psi, PSI_CPU);
}
@@ -4400,12 +4400,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
}
} while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
- if (!list_empty(&cset->tasks))
+ if (!list_empty(&cset->tasks)) {
it->task_pos = cset->tasks.next;
- else if (!list_empty(&cset->mg_tasks))
+ it->cur_tasks_head = &cset->tasks;
+ } else if (!list_empty(&cset->mg_tasks)) {
it->task_pos = cset->mg_tasks.next;
- else
+ it->cur_tasks_head = &cset->mg_tasks;
+ } else {
it->task_pos = cset->dying_tasks.next;
+ it->cur_tasks_head = &cset->dying_tasks;
+ }
it->tasks_head = &cset->tasks;
it->mg_tasks_head = &cset->mg_tasks;
@@ -4463,10 +4467,14 @@ repeat:
else
it->task_pos = it->task_pos->next;
- if (it->task_pos == it->tasks_head)
+ if (it->task_pos == it->tasks_head) {
it->task_pos = it->mg_tasks_head->next;
- if (it->task_pos == it->mg_tasks_head)
+ it->cur_tasks_head = it->mg_tasks_head;
+ }
+ if (it->task_pos == it->mg_tasks_head) {
it->task_pos = it->dying_tasks_head->next;
+ it->cur_tasks_head = it->dying_tasks_head;
+ }
if (it->task_pos == it->dying_tasks_head)
css_task_iter_advance_css_set(it);
} else {
@@ -4485,11 +4493,12 @@ repeat:
goto repeat;
/* and dying leaders w/o live member threads */
- if (!atomic_read(&task->signal->live))
+ if (it->cur_tasks_head == it->dying_tasks_head &&
+ !atomic_read(&task->signal->live))
goto repeat;
} else {
/* skip all dying ones */
- if (task->flags & PF_EXITING)
+ if (it->cur_tasks_head == it->dying_tasks_head)
goto repeat;
}
}
@@ -4595,6 +4604,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
struct kernfs_open_file *of = s->private;
struct css_task_iter *it = of->priv;
+ if (pos)
+ (*pos)++;
+
return css_task_iter_next(it);
}
@@ -4610,7 +4622,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
* from position 0, so we can simply keep iterating on !0 *pos.
*/
if (!it) {
- if (WARN_ON_ONCE((*pos)++))
+ if (WARN_ON_ONCE((*pos)))
return ERR_PTR(-EINVAL);
it = kzalloc(sizeof(*it), GFP_KERNEL);
@@ -4618,10 +4630,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
return ERR_PTR(-ENOMEM);
of->priv = it;
css_task_iter_start(&cgrp->self, iter_flags, it);
- } else if (!(*pos)++) {
+ } else if (!(*pos)) {
css_task_iter_end(it);
css_task_iter_start(&cgrp->self, iter_flags, it);
- }
+ } else
+ return it->cur_task;
return cgroup_procs_next(s, NULL, NULL);
}
@@ -6258,6 +6271,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
return;
}
+ /* Don't associate the sock with unrelated interrupted task's cgroup. */
+ if (in_interrupt())
+ return;
+
rcu_read_lock();
while (true) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e453589da97c..bbdfac0182f4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9206,7 +9206,6 @@ static void bpf_overflow_handler(struct perf_event *event,
int ret = 0;
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
- preempt_disable();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out;
rcu_read_lock();
@@ -9214,7 +9213,6 @@ static void bpf_overflow_handler(struct perf_event *event,
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
- preempt_enable();
if (!ret)
return;
diff --git a/kernel/exit.c b/kernel/exit.c
index 2833ffb0c211..0b81b26a872a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -619,8 +619,8 @@ static void forget_original_parent(struct task_struct *father,
reaper = find_new_reaper(father, reaper);
list_for_each_entry(p, &father->children, sibling) {
for_each_thread(p, t) {
- t->real_parent = reaper;
- BUG_ON((!t->ptrace) != (t->parent == father));
+ RCU_INIT_POINTER(t->real_parent, reaper);
+ BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
if (likely(!t->ptrace))
t->parent = t->real_parent;
if (t->pdeath_signal)
diff --git a/kernel/fork.c b/kernel/fork.c
index 60a1295f4384..86425305cd4a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1508,7 +1508,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- rcu_assign_pointer(tsk->sighand, sig);
+ RCU_INIT_POINTER(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
diff --git a/kernel/pid.c b/kernel/pid.c
index 0f4ecb57214c..647b4bb457b5 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -247,6 +247,16 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
tmp = tmp->parent;
}
+ /*
+ * ENOMEM is not the most obvious choice especially for the case
+ * where the child subreaper has already exited and the pid
+ * namespace denies the creation of any new processes. But ENOMEM
+ * is what we have exposed to userspace for a long time and it is
+ * documented behavior for pid namespaces. So we can't easily
+ * change it even if there were an error code better suited.
+ */
+ retval = -ENOMEM;
+
if (unlikely(is_child_reaper(pid))) {
if (pid_ns_prepare_proc(ns))
goto out_free;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index ddade80ad276..d82b7b88d616 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1681,7 +1681,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
* hibernation for allocations made while saving the image and for device
* drivers, in case they need to allocate memory from their hibernation
* callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
- * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
+ * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3c8a379c357e..c1217bfe5e81 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8337,6 +8337,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sgs->group_capacity = group->sgc->capacity;
+ sgs->group_weight = group->group_weight;
+
sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
/*
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index b6ea3dcb57bf..787041eb011b 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -268,16 +268,14 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
- preempt_disable();
for (; f; f = f->prev) {
- u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
+ u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd);
if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
ret = cur_ret;
*match = f;
}
}
- preempt_enable();
return ret;
}
#endif /* CONFIG_SECCOMP_FILTER */
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 0735ae8545d8..ca39dc3230cb 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -335,6 +335,7 @@ static void put_probe_ref(void)
static void blk_trace_cleanup(struct blk_trace *bt)
{
+ synchronize_rcu();
blk_trace_free(bt);
put_probe_ref();
}
@@ -629,8 +630,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
static int __blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (bt == NULL)
return -EINVAL;
@@ -740,8 +743,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
void blk_trace_shutdown(struct request_queue *q)
{
mutex_lock(&q->blk_trace_mutex);
-
- if (q->blk_trace) {
+ if (rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex))) {
__blk_trace_startstop(q, 0);
__blk_trace_remove(q);
}
@@ -752,8 +755,10 @@ void blk_trace_shutdown(struct request_queue *q)
#ifdef CONFIG_BLK_CGROUP
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ /* We don't use the 'bt' value here except as an optimization... */
+ bt = rcu_dereference_protected(q->blk_trace, 1);
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return 0;
@@ -796,10 +801,14 @@ blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
static void blk_add_trace_rq(struct request *rq, int error,
unsigned int nr_bytes, u32 what, u64 cgid)
{
- struct blk_trace *bt = rq->q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(rq->q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
if (blk_rq_is_passthrough(rq))
what |= BLK_TC_ACT(BLK_TC_PC);
@@ -808,6 +817,7 @@ static void blk_add_trace_rq(struct request *rq, int error,
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
rq->cmd_flags, what, error, 0, NULL, cgid);
+ rcu_read_unlock();
}
static void blk_add_trace_rq_insert(void *ignore,
@@ -853,14 +863,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, what, error, 0, NULL,
blk_trace_bio_get_cgid(q, bio));
+ rcu_read_unlock();
}
static void blk_add_trace_bio_bounce(void *ignore,
@@ -905,11 +920,14 @@ static void blk_add_trace_getrq(void *ignore,
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
else {
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
NULL, 0);
+ rcu_read_unlock();
}
}
@@ -921,27 +939,35 @@ static void blk_add_trace_sleeprq(void *ignore,
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
else {
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
0, 0, NULL, 0);
+ rcu_read_unlock();
}
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
+ rcu_read_unlock();
}
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
unsigned int depth, bool explicit)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(depth);
u32 what;
@@ -953,14 +979,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
}
+ rcu_read_unlock();
}
static void blk_add_trace_split(void *ignore,
struct request_queue *q, struct bio *bio,
unsigned int pdu)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(pdu);
@@ -969,6 +998,7 @@ static void blk_add_trace_split(void *ignore,
BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
&rpdu, blk_trace_bio_get_cgid(q, bio));
}
+ rcu_read_unlock();
}
/**
@@ -988,11 +1018,15 @@ static void blk_add_trace_bio_remap(void *ignore,
struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
struct blk_io_trace_remap r;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(bio_dev(bio));
@@ -1001,6 +1035,7 @@ static void blk_add_trace_bio_remap(void *ignore,
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
+ rcu_read_unlock();
}
/**
@@ -1021,11 +1056,15 @@ static void blk_add_trace_rq_remap(void *ignore,
struct request *rq, dev_t dev,
sector_t from)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
struct blk_io_trace_remap r;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
@@ -1034,6 +1073,7 @@ static void blk_add_trace_rq_remap(void *ignore,
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
+ rcu_read_unlock();
}
/**
@@ -1051,14 +1091,19 @@ void blk_add_driver_data(struct request_queue *q,
struct request *rq,
void *data, size_t len)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, 0, len, data,
blk_trace_request_get_cgid(q, rq));
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1597,6 +1642,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
return -EINVAL;
put_probe_ref();
+ synchronize_rcu();
blk_trace_free(bt);
return 0;
}
@@ -1758,6 +1804,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct hd_struct *p = dev_to_part(dev);
struct request_queue *q;
struct block_device *bdev;
+ struct blk_trace *bt;
ssize_t ret = -ENXIO;
bdev = bdget(part_devt(p));
@@ -1770,21 +1817,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) {
- ret = sprintf(buf, "%u\n", !!q->blk_trace);
+ ret = sprintf(buf, "%u\n", !!bt);
goto out_unlock_bdev;
}
- if (q->blk_trace == NULL)
+ if (bt == NULL)
ret = sprintf(buf, "disabled\n");
else if (attr == &dev_attr_act_mask)
- ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
+ ret = blk_trace_mask2str(buf, bt->act_mask);
else if (attr == &dev_attr_pid)
- ret = sprintf(buf, "%u\n", q->blk_trace->pid);
+ ret = sprintf(buf, "%u\n", bt->pid);
else if (attr == &dev_attr_start_lba)
- ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
+ ret = sprintf(buf, "%llu\n", bt->start_lba);
else if (attr == &dev_attr_end_lba)
- ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
+ ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev:
mutex_unlock(&q->blk_trace_mutex);
@@ -1801,6 +1850,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct block_device *bdev;
struct request_queue *q;
struct hd_struct *p;
+ struct blk_trace *bt;
u64 value;
ssize_t ret = -EINVAL;
@@ -1831,8 +1881,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) {
- if (!!value == !!q->blk_trace) {
+ if (!!value == !!bt) {
ret = 0;
goto out_unlock_bdev;
}
@@ -1844,18 +1896,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
}
ret = 0;
- if (q->blk_trace == NULL)
+ if (bt == NULL) {
ret = blk_trace_setup_queue(q, bdev);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
+ }
if (ret == 0) {
if (attr == &dev_attr_act_mask)
- q->blk_trace->act_mask = value;
+ bt->act_mask = value;
else if (attr == &dev_attr_pid)
- q->blk_trace->pid = value;
+ bt->pid = value;
else if (attr == &dev_attr_start_lba)
- q->blk_trace->start_lba = value;
+ bt->start_lba = value;
else if (attr == &dev_attr_end_lba)
- q->blk_trace->end_lba = value;
+ bt->end_lba = value;
}
out_unlock_bdev:
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index b8661bd0d028..07764c761073 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -83,7 +83,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
if (in_nmi()) /* not supported yet */
return 1;
- preempt_disable();
+ cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
/*
@@ -115,11 +115,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
out:
__this_cpu_dec(bpf_prog_active);
- preempt_enable();
return ret;
}
-EXPORT_SYMBOL_GPL(trace_call_bpf);
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
@@ -1516,10 +1514,9 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
+ cant_sleep();
rcu_read_lock();
- preempt_disable();
(void) BPF_PROG_RUN(prog, args);
- preempt_enable();
rcu_read_unlock();
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3f7ee102868a..fd81c7de77a7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1547,6 +1547,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
rec = bsearch(&key, pg->records, pg->index,
sizeof(struct dyn_ftrace),
ftrace_cmp_recs);
+ if (rec)
+ break;
}
return rec;
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 18d16f3ef980..2a8e8e9c1c75 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1333,8 +1333,15 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
int size, esize;
int rctx;
- if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
- return;
+ if (bpf_prog_array_valid(call)) {
+ u32 ret;
+
+ preempt_disable();
+ ret = trace_call_bpf(call, regs);
+ preempt_enable();
+ if (!ret)
+ return;
+ }
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 301db4406bc3..4e01c448b4b4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
return;
rcu_read_lock();
retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
/* pwq which will be used unless @work is executing elsewhere */
- if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
- else
+ if (wq->flags & WQ_UNBOUND) {
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+ } else {
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = raw_smp_processor_id();
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ }
/*
* If @work was previously on a different pool, it might still be
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index cecb230833be..a5fddf9ebcb7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6660,14 +6660,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- preempt_disable();
+ migrate_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
finish = ktime_get_ns();
- preempt_enable();
+ migrate_enable();
*duration = finish - start;
do_div(*duration, runs);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b08b199f9a11..24ad53b4dfc0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3043,8 +3043,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
return;
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
- pmdval = *pvmw->pmd;
- pmdp_invalidate(vma, address, pvmw->pmd);
+ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
if (pmd_dirty(pmdval))
set_page_dirty(page);
entry = make_migration_entry(page, pmd_write(pmdval));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d09776cd6e10..2058b8da18db 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6682,19 +6682,9 @@ void mem_cgroup_sk_alloc(struct sock *sk)
if (!mem_cgroup_sockets_enabled)
return;
- /*
- * Socket cloning can throw us here with sk_memcg already
- * filled. It won't however, necessarily happen from
- * process context. So the test for root memcg given
- * the current task's memcg won't help us in this case.
- *
- * Respecting the original socket's memcg is a better
- * decision in this case.
- */
- if (sk->sk_memcg) {
- css_get(&sk->sk_memcg->css);
+ /* Do not associate the sock with unrelated interrupted task's memcg. */
+ if (in_interrupt())
return;
- }
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
diff --git a/mm/memory.c b/mm/memory.c
index 0bccc622e482..e8bfdf0d9d1d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2257,7 +2257,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
bool ret;
void *kaddr;
void __user *uaddr;
- bool force_mkyoung;
+ bool locked = false;
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = vmf->address;
@@ -2282,11 +2282,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
- force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
- if (force_mkyoung) {
+ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
pte_t entry;
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ locked = true;
if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
/*
* Other thread has already handled the fault
@@ -2310,18 +2310,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ if (locked)
+ goto warn;
+
+ /* Re-validate under PTL if the page is still mapped */
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ locked = true;
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /* The PTE changed under us. Retry page fault. */
+ ret = false;
+ goto pte_unlock;
+ }
+
/*
- * Give a warn in case there can be some obscure
- * use-case
+ * The same page can be mapped back since last copy attampt.
+ * Try to copy again under PTL.
*/
- WARN_ON_ONCE(1);
- clear_page(kaddr);
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ /*
+ * Give a warn in case there can be some obscure
+ * use-case
+ */
+warn:
+ WARN_ON_ONCE(1);
+ clear_page(kaddr);
+ }
}
ret = true;
pte_unlock:
- if (force_mkyoung)
+ if (locked)
pte_unmap_unlock(vmf->pte, vmf->ptl);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0a54ffac8c68..19389cdc16a5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -574,7 +574,13 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback);
void generic_online_page(struct page *page, unsigned int order)
{
- kernel_map_pages(page, 1 << order, 1);
+ /*
+ * Freeing the page with debug_pagealloc enabled will try to unmap it,
+ * so we should map it first. This is better than introducing a special
+ * case in page freeing fast path.
+ */
+ if (debug_pagealloc_enabled_static())
+ kernel_map_pages(page, 1 << order, 1);
__free_pages_core(page, order);
totalram_pages_add(1UL << order);
#ifdef CONFIG_HIGHMEM
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7a8e84f86831..311c0dadf71c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return pages;
}
+/*
+ * Used when setting automatic NUMA hinting protection where it is
+ * critical that a numa hinting PMD is not confused with a bad PMD.
+ */
+static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+
+ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+
+ if (pmd_none(pmdval))
+ return 1;
+ if (pmd_trans_huge(pmdval))
+ return 0;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
unsigned long this_pages;
next = pmd_addr_end(addr, end);
- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
- && pmd_none_or_clear_bad(pmd))
+
+ /*
+ * Automatic NUMA balancing walks the tables with mmap_sem
+ * held for read. It's possible a parallel update to occur
+ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
+ * check leading to a false positive and clearing.
+ * Hence, it's necessary to atomically read the PMD value
+ * for all the checks.
+ */
+ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
+ pmd_none_or_clear_bad_unless_trans_huge(pmd))
goto next;
/* invoke the mmu notifier if the pmd is populated */
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 43754d8ebce8..42f31c4b53ad 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,7 +41,6 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
#include <linux/zpool.h>
#include <linux/magic.h>
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index f0209505e41a..a7c8dd7ae513 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
+ /* interface already disabled by batadv_iv_ogm_iface_disable */
+ if (!*ogm_buff)
+ return;
+
/* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface() where the originator mac is set and
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 3d21dd83f8cc..b85da4b7a77b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -88,7 +88,7 @@ struct batadv_dhcp_packet {
__u8 sname[64];
__u8 file[128];
__be32 magic;
- __u8 options[0];
+ __u8 options[];
};
#define BATADV_DHCP_YIADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->yiaddr)
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 692306df7b6f..2a234d0ad445 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2020.0"
+#define BATADV_SOURCE_VERSION "2020.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 852932838ddc..a9635c882fe0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -862,7 +862,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
u8 *tt_change_ptr;
spin_lock_bh(&orig_node->vlan_list_lock);
- hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
num_vlan++;
num_entries += atomic_read(&vlan->tt.num_entries);
}
@@ -888,7 +888,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
- hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
@@ -937,7 +937,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
int change_offset;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
continue;
@@ -967,7 +967,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
- hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
continue;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index d555c0d8657d..562443f94133 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -37,7 +37,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
repeat = 1;
rcu_read_lock();
- preempt_disable();
+ migrate_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
bpf_cgroup_storage_set(storage);
@@ -54,18 +54,18 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
if (need_resched()) {
time_spent += ktime_get_ns() - time_start;
- preempt_enable();
+ migrate_enable();
rcu_read_unlock();
cond_resched();
rcu_read_lock();
- preempt_disable();
+ migrate_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
- preempt_enable();
+ migrate_enable();
rcu_read_unlock();
do_div(time_spent, repeat);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 03c7cdd8e4cb..195d2d67be8a 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -112,7 +112,8 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
caif_device_list(dev_net(dev));
struct caif_device_entry *caifd;
- list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+ list_for_each_entry_rcu(caifd, &caifdevs->list, list,
+ lockdep_rtnl_is_held()) {
if (caifd->netdev == dev)
return caifd;
}
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 3ab23f698221..756b63b6f7b3 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -8,6 +8,7 @@
#include <linux/bpf.h>
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
static atomic_t cache_idx;
@@ -60,7 +61,7 @@ struct bpf_sk_storage_data {
* the number of cachelines access during the cache hit case.
*/
struct bpf_sk_storage_map __rcu *smap;
- u8 data[0] __aligned(8);
+ u8 data[] __aligned(8);
};
/* Linked to bpf_sk_storage and bpf_sk_storage_map */
@@ -606,6 +607,14 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
kfree(map);
}
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_sk_storage_elem)))
+
static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
{
if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK ||
@@ -619,12 +628,7 @@ static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (attr->value_size >= KMALLOC_MAX_SIZE -
- MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem) ||
- /* U16_MAX is much more than enough for sk local storage
- * considering a tcp_sock is ~2k.
- */
- attr->value_size > U16_MAX - sizeof(struct bpf_sk_storage_elem))
+ if (attr->value_size > MAX_VALUE_SIZE)
return -E2BIG;
return 0;
@@ -910,3 +914,270 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_SOCKET,
};
+
+struct bpf_sk_storage_diag {
+ u32 nr_maps;
+ struct bpf_map *maps[];
+};
+
+/* The reply will be like:
+ * INET_DIAG_BPF_SK_STORAGES (nla_nest)
+ * SK_DIAG_BPF_STORAGE (nla_nest)
+ * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
+ * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
+ * SK_DIAG_BPF_STORAGE (nla_nest)
+ * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
+ * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
+ * ....
+ */
+static int nla_value_size(u32 value_size)
+{
+ /* SK_DIAG_BPF_STORAGE (nla_nest)
+ * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
+ * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
+ */
+ return nla_total_size(0) + nla_total_size(sizeof(u32)) +
+ nla_total_size_64bit(value_size);
+}
+
+void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
+{
+ u32 i;
+
+ if (!diag)
+ return;
+
+ for (i = 0; i < diag->nr_maps; i++)
+ bpf_map_put(diag->maps[i]);
+
+ kfree(diag);
+}
+EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
+
+static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
+ const struct bpf_map *map)
+{
+ u32 i;
+
+ for (i = 0; i < diag->nr_maps; i++) {
+ if (diag->maps[i] == map)
+ return true;
+ }
+
+ return false;
+}
+
+struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
+{
+ struct bpf_sk_storage_diag *diag;
+ struct nlattr *nla;
+ u32 nr_maps = 0;
+ int rem, err;
+
+ /* bpf_sk_storage_map is currently limited to CAP_SYS_ADMIN as
+ * the map_alloc_check() side also does.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ nla_for_each_nested(nla, nla_stgs, rem) {
+ if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
+ nr_maps++;
+ }
+
+ diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
+ GFP_KERNEL);
+ if (!diag)
+ return ERR_PTR(-ENOMEM);
+
+ nla_for_each_nested(nla, nla_stgs, rem) {
+ struct bpf_map *map;
+ int map_fd;
+
+ if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
+ continue;
+
+ map_fd = nla_get_u32(nla);
+ map = bpf_map_get(map_fd);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_free;
+ }
+ if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
+ bpf_map_put(map);
+ err = -EINVAL;
+ goto err_free;
+ }
+ if (diag_check_dup(diag, map)) {
+ bpf_map_put(map);
+ err = -EEXIST;
+ goto err_free;
+ }
+ diag->maps[diag->nr_maps++] = map;
+ }
+
+ return diag;
+
+err_free:
+ bpf_sk_storage_diag_free(diag);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
+
+static int diag_get(struct bpf_sk_storage_data *sdata, struct sk_buff *skb)
+{
+ struct nlattr *nla_stg, *nla_value;
+ struct bpf_sk_storage_map *smap;
+
+ /* It cannot exceed max nlattr's payload */
+ BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < MAX_VALUE_SIZE);
+
+ nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
+ if (!nla_stg)
+ return -EMSGSIZE;
+
+ smap = rcu_dereference(sdata->smap);
+ if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
+ goto errout;
+
+ nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
+ smap->map.value_size,
+ SK_DIAG_BPF_STORAGE_PAD);
+ if (!nla_value)
+ goto errout;
+
+ if (map_value_has_spin_lock(&smap->map))
+ copy_map_value_locked(&smap->map, nla_data(nla_value),
+ sdata->data, true);
+ else
+ copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
+
+ nla_nest_end(skb, nla_stg);
+ return 0;
+
+errout:
+ nla_nest_cancel(skb, nla_stg);
+ return -EMSGSIZE;
+}
+
+static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
+ unsigned int diag_size = nla_total_size(0);
+ struct bpf_sk_storage *sk_storage;
+ struct bpf_sk_storage_elem *selem;
+ struct bpf_sk_storage_map *smap;
+ struct nlattr *nla_stgs;
+ unsigned int saved_len;
+ int err = 0;
+
+ rcu_read_lock();
+
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage || hlist_empty(&sk_storage->list)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ nla_stgs = nla_nest_start(skb, stg_array_type);
+ if (!nla_stgs)
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+
+ saved_len = skb->len;
+ hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
+ smap = rcu_dereference(SDATA(selem)->smap);
+ diag_size += nla_value_size(smap->map.value_size);
+
+ if (nla_stgs && diag_get(SDATA(selem), skb))
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+ }
+
+ rcu_read_unlock();
+
+ if (nla_stgs) {
+ if (saved_len == skb->len)
+ nla_nest_cancel(skb, nla_stgs);
+ else
+ nla_nest_end(skb, nla_stgs);
+ }
+
+ if (diag_size == nla_total_size(0)) {
+ *res_diag_size = 0;
+ return 0;
+ }
+
+ *res_diag_size = diag_size;
+ return err;
+}
+
+int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
+ unsigned int diag_size = nla_total_size(0);
+ struct bpf_sk_storage *sk_storage;
+ struct bpf_sk_storage_data *sdata;
+ struct nlattr *nla_stgs;
+ unsigned int saved_len;
+ int err = 0;
+ u32 i;
+
+ *res_diag_size = 0;
+
+ /* No map has been specified. Dump all. */
+ if (!diag->nr_maps)
+ return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
+ res_diag_size);
+
+ rcu_read_lock();
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage || hlist_empty(&sk_storage->list)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ nla_stgs = nla_nest_start(skb, stg_array_type);
+ if (!nla_stgs)
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+
+ saved_len = skb->len;
+ for (i = 0; i < diag->nr_maps; i++) {
+ sdata = __sk_storage_lookup(sk_storage,
+ (struct bpf_sk_storage_map *)diag->maps[i],
+ false);
+
+ if (!sdata)
+ continue;
+
+ diag_size += nla_value_size(diag->maps[i]->value_size);
+
+ if (nla_stgs && diag_get(sdata, skb))
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+ }
+ rcu_read_unlock();
+
+ if (nla_stgs) {
+ if (saved_len == skb->len)
+ nla_nest_cancel(skb, nla_stgs);
+ else
+ nla_nest_end(skb, nla_stgs);
+ }
+
+ if (diag_size == nla_total_size(0)) {
+ *res_diag_size = 0;
+ return 0;
+ }
+
+ *res_diag_size = diag_size;
+ return err;
+}
+EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a78e7f864c1e..4213081c6ed3 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -166,8 +166,6 @@ done:
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last)
{
@@ -198,8 +196,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
refcount_inc(&skb->users);
} else {
__skb_unlink(skb, queue);
- if (destructor)
- destructor(sk, skb);
}
*off = _off;
return skb;
@@ -212,7 +208,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
* @sk: socket
* @queue: socket queue from which to receive
* @flags: MSG\_ flags
- * @destructor: invoked under the receive lock on successful dequeue
* @off: an offset in bytes to peek skb from. Returns an offset
* within an skb where data actually starts
* @err: error code returned
@@ -245,10 +240,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
*/
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err,
+ unsigned int flags, int *off, int *err,
struct sk_buff **last)
{
struct sk_buff *skb;
@@ -269,8 +261,8 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
* However, this function was correct in any case. 8)
*/
spin_lock_irqsave(&queue->lock, cpu_flags);
- skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
- off, &error, last);
+ skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
+ last);
spin_unlock_irqrestore(&queue->lock, cpu_flags);
if (error)
goto no_packet;
@@ -293,10 +285,7 @@ EXPORT_SYMBOL(__skb_try_recv_datagram);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err)
+ unsigned int flags, int *off, int *err)
{
struct sk_buff *skb, *last;
long timeo;
@@ -304,8 +293,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
- skb = __skb_try_recv_datagram(sk, sk_queue, flags, destructor,
- off, err, &last);
+ skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
+ &last);
if (skb)
return skb;
@@ -326,7 +315,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
return __skb_recv_datagram(sk, &sk->sk_receive_queue,
flags | (noblock ? MSG_DONTWAIT : 0),
- NULL, &off, err);
+ &off, err);
}
EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 25dab1598803..d84541c24446 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4848,7 +4848,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb);
- switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
+ switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
+ &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index f8af5e2d748b..f51bebc8c33f 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -545,6 +545,7 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
attrs->phys.port_number))
return -EMSGSIZE;
@@ -3352,34 +3353,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
struct genl_info *info,
union devlink_param_value *value)
{
+ struct nlattr *param_data;
int len;
- if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
- !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
+ param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
+
+ if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
return -EINVAL;
switch (param->type) {
case DEVLINK_PARAM_TYPE_U8:
- value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ if (nla_len(param_data) != sizeof(u8))
+ return -EINVAL;
+ value->vu8 = nla_get_u8(param_data);
break;
case DEVLINK_PARAM_TYPE_U16:
- value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ if (nla_len(param_data) != sizeof(u16))
+ return -EINVAL;
+ value->vu16 = nla_get_u16(param_data);
break;
case DEVLINK_PARAM_TYPE_U32:
- value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ if (nla_len(param_data) != sizeof(u32))
+ return -EINVAL;
+ value->vu32 = nla_get_u32(param_data);
break;
case DEVLINK_PARAM_TYPE_STRING:
- len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
- nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
- if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+ len = strnlen(nla_data(param_data), nla_len(param_data));
+ if (len == nla_len(param_data) ||
len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
return -EINVAL;
- strcpy(value->vstr,
- nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+ strcpy(value->vstr, nla_data(param_data));
break;
case DEVLINK_PARAM_TYPE_BOOL:
- value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
- true : false;
+ if (param_data && nla_len(param_data))
+ return -EINVAL;
+ value->vbool = nla_get_flag(param_data);
break;
}
return 0;
@@ -4232,7 +4240,7 @@ struct devlink_fmsg_item {
int attrtype;
u8 nla_type;
u16 len;
- int value[0];
+ int value[];
};
struct devlink_fmsg {
@@ -6032,6 +6040,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
[DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
@@ -6806,6 +6816,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (!attrs->split)
n = snprintf(name, len, "p%u", attrs->phys.port_number);
else
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index d58c1c45a895..8e33cec9fc4e 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -68,7 +68,7 @@ struct net_dm_hw_entry {
struct net_dm_hw_entries {
u32 num_entries;
- struct net_dm_hw_entry entries[0];
+ struct net_dm_hw_entry entries[];
};
struct per_cpu_dm_data {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a1670dff0629..3eff84824c8b 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -920,9 +920,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
flow_keys->flags = flags;
- preempt_disable();
- result = BPF_PROG_RUN(prog, ctx);
- preempt_enable();
+ result = bpf_prog_run_pin_on_cpu(prog, ctx);
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index d21348202ba6..7440e6117c81 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -188,6 +188,13 @@ void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
}
EXPORT_SYMBOL(flow_action_cookie_destroy);
+void flow_rule_match_ct(const struct flow_rule *rule,
+ struct flow_match_ct *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ct);
+
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv))
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 0642f91c4038..b4c87fe31be2 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
kfree(css_cls_state(css));
}
+/*
+ * To avoid freezing of sockets creation for tasks with big number of threads
+ * and opened sockets lets release file_lock every 1000 iterated descriptors.
+ * New sockets will already have been created with new classid.
+ */
+
+struct update_classid_context {
+ u32 classid;
+ unsigned int batch;
+};
+
+#define UPDATE_CLASSID_BATCH 1000
+
static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
int err;
+ struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file, &err);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
- sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
- (unsigned long)v);
+ sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
spin_unlock(&cgroup_sk_update_lock);
}
+ if (--ctx->batch == 0) {
+ ctx->batch = UPDATE_CLASSID_BATCH;
+ return n + 1;
+ }
return 0;
}
+static void update_classid_task(struct task_struct *p, u32 classid)
+{
+ struct update_classid_context ctx = {
+ .classid = classid,
+ .batch = UPDATE_CLASSID_BATCH
+ };
+ unsigned int fd = 0;
+
+ do {
+ task_lock(p);
+ fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
+ task_unlock(p);
+ cond_resched();
+ } while (fd);
+}
+
static void cgrp_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct task_struct *p;
cgroup_taskset_for_each(p, css, tset) {
- task_lock(p);
- iterate_fd(p->files, 0, update_classid_sock,
- (void *)(unsigned long)css_cls_state(css)->classid);
- task_unlock(p);
+ update_classid_task(p, css_cls_state(css)->classid);
}
}
@@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
css_task_iter_start(css, 0, &it);
while ((p = css_task_iter_next(&it))) {
- task_lock(p);
- iterate_fd(p->files, 0, update_classid_sock,
- (void *)(unsigned long)cs->classid);
- task_unlock(p);
+ update_classid_task(p, cs->classid);
cond_resched();
}
css_task_iter_end(&it);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index acc849df60b5..f2b3d8dd40f4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2003,8 +2003,8 @@ static int pktgen_setup_dev(const struct pktgen_net *pn,
return -ENODEV;
}
- if (odev->type != ARPHRD_ETHER) {
- pr_err("not an ethernet device: \"%s\"\n", ifname);
+ if (odev->type != ARPHRD_ETHER && odev->type != ARPHRD_LOOPBACK) {
+ pr_err("not an ethernet or loopback device: \"%s\"\n", ifname);
err = -EINVAL;
} else if (!netif_running(odev)) {
pr_err("device is down: \"%s\"\n", ifname);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index eeb28cb85664..c479372f2cd2 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -628,7 +628,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct bpf_prog *prog;
int ret;
- preempt_disable();
rcu_read_lock();
prog = READ_ONCE(psock->progs.msg_parser);
if (unlikely(!prog)) {
@@ -638,7 +637,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
sk_msg_compute_data_pointers(msg);
msg->sk = sk;
- ret = BPF_PROG_RUN(prog, msg);
+ ret = bpf_prog_run_pin_on_cpu(prog, msg);
ret = sk_psock_map_verd(ret, msg->sk_redir);
psock->apply_bytes = msg->apply_bytes;
if (ret == __SK_REDIRECT) {
@@ -653,7 +652,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
}
out:
rcu_read_unlock();
- preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
@@ -665,9 +663,7 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
skb->sk = psock->sk;
bpf_compute_data_end_sk_skb(skb);
- preempt_disable();
- ret = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ ret = bpf_prog_run_pin_on_cpu(prog, skb);
/* strparser clones the skb before handing it to a upper layer,
* meaning skb_orphan has been called. We NULL sk on the way out
* to ensure we don't trigger a BUG_ON() in skb/sk operations
diff --git a/net/core/sock.c b/net/core/sock.c
index e4af4dbc1c9e..0fc8937a7ff4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1832,7 +1832,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE);
- mem_cgroup_sk_alloc(newsk);
+
+ /* sk->sk_memcg will be populated at accept() time */
+ newsk->sk_memcg = NULL;
+
cgroup_sk_alloc(&newsk->sk_cgrp_data);
rcu_read_lock();
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 70f88f2b4456..105f3734dadb 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -95,7 +95,7 @@ void ccid_cleanup_builtins(void);
struct ccid {
struct ccid_operations *ccid_ops;
- char ccid_priv[0];
+ char ccid_priv[];
};
static inline void *ccid_priv(const struct ccid *ccid)
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 73ef73a218ff..8a82c5a2c5a8 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -46,16 +46,15 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+ inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r);
}
-static int dccp_diag_dump_one(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int dccp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+ return inet_diag_dump_one_icsk(&dccp_hashinfo, cb, req);
}
static const struct inet_diag_handler dccp_diag_handler = {
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index a7662e7a691d..760e6ea3178a 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -117,7 +117,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
/* port.c */
int dsa_port_set_state(struct dsa_port *dp, u8 state,
struct switchdev_trans *trans);
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
+void dsa_port_disable_rt(struct dsa_port *dp);
void dsa_port_disable(struct dsa_port *dp);
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index d4450a454249..e6875d8f944d 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
}
-int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
if (!dp->bridge_dev)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+ if (dp->pl)
+ phylink_start(dp->pl);
+
return 0;
}
-void dsa_port_disable(struct dsa_port *dp)
+int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+{
+ int err;
+
+ rtnl_lock();
+ err = dsa_port_enable_rt(dp, phy);
+ rtnl_unlock();
+
+ return err;
+}
+
+void dsa_port_disable_rt(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
+ if (dp->pl)
+ phylink_stop(dp->pl);
+
if (!dp->bridge_dev)
dsa_port_set_state_now(dp, BR_STATE_DISABLED);
@@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp)
ds->ops->port_disable(ds, port);
}
+void dsa_port_disable(struct dsa_port *dp)
+{
+ rtnl_lock();
+ dsa_port_disable_rt(dp);
+ rtnl_unlock();
+}
+
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
@@ -617,10 +641,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
goto err_phy_connect;
}
- rtnl_lock();
- phylink_start(dp->pl);
- rtnl_unlock();
-
return 0;
err_phy_connect:
@@ -631,9 +651,14 @@ err_phy_connect:
int dsa_port_link_register_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
+ struct device_node *phy_np;
- if (!ds->ops->adjust_link)
- return dsa_port_phylink_register(dp);
+ if (!ds->ops->adjust_link) {
+ phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
+ if (of_phy_is_fixed_link(dp->dn) || phy_np)
+ return dsa_port_phylink_register(dp);
+ return 0;
+ }
dev_warn(ds->dev,
"Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
@@ -648,11 +673,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
- if (!ds->ops->adjust_link) {
+ if (!ds->ops->adjust_link && dp->pl) {
rtnl_lock();
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
phylink_destroy(dp->pl);
+ dp->pl = NULL;
return;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 088c886e609e..c5beb3031a72 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -88,12 +88,10 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_allmulti;
}
- err = dsa_port_enable(dp, dev->phydev);
+ err = dsa_port_enable_rt(dp, dev->phydev);
if (err)
goto clear_promisc;
- phylink_start(dp->pl);
-
return 0;
clear_promisc:
@@ -114,9 +112,7 @@ static int dsa_slave_close(struct net_device *dev)
struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- phylink_stop(dp->pl);
-
- dsa_port_disable(dp);
+ dsa_port_disable_rt(dp);
dev_mc_unsync(master, dev);
dev_uc_unsync(master, dev);
@@ -865,6 +861,10 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
if (!flow_offload_has_one_action(&cls->rule->action))
return err;
+ if (!flow_action_basic_hw_stats_types_check(&cls->rule->action,
+ cls->common.extack))
+ return err;
+
act = &cls->rule->action.entries[0];
if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
@@ -946,6 +946,64 @@ static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
}
}
+static int dsa_slave_add_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_add(ds, port, cls, ingress);
+}
+
+static int dsa_slave_del_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_del(ds, port, cls, ingress);
+}
+
+static int dsa_slave_stats_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_stats)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_stats(ds, port, cls, ingress);
+}
+
+static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+ return dsa_slave_add_cls_flower(dev, cls, ingress);
+ case FLOW_CLS_DESTROY:
+ return dsa_slave_del_cls_flower(dev, cls, ingress);
+ case FLOW_CLS_STATS:
+ return dsa_slave_stats_cls_flower(dev, cls, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv, bool ingress)
{
@@ -957,6 +1015,8 @@ static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSMATCHALL:
return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 8e3e7283d430..59de1315100f 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -153,7 +153,8 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
memset(injection, 0, OCELOT_TAG_LEN);
- src = dsa_upstream_port(ds, port);
+ /* Set the source port as the CPU port module and not the NPI port */
+ src = ocelot->num_phys_ports;
dest = BIT(port);
bypass = true;
qos_class = skb->priority;
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 424545a4aaec..b0bd3decad02 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -5,4 +5,5 @@ obj-y += ioctl.o common.o
obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
- linkstate.o debug.o wol.o
+ linkstate.o debug.o wol.o features.o privflags.o rings.o \
+ channels.o
diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
index ef9197541cb3..dae7402eaca3 100644
--- a/net/ethtool/bitset.c
+++ b/net/ethtool/bitset.c
@@ -588,6 +588,100 @@ int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits,
return 0;
}
+/**
+ * ethnl_parse_bitset() - Compute effective value and mask from bitset nest
+ * @val: unsigned long based bitmap to put value into
+ * @mask: unsigned long based bitmap to put mask into
+ * @nbits: size of @val and @mask bitmaps
+ * @attr: nest attribute to parse and apply
+ * @names: array of bit names; may be null for compact format
+ * @extack: extack for error reporting
+ *
+ * Provide @nbits size long bitmaps for value and mask so that
+ * x = (val & mask) | (x & ~mask) would modify any @nbits sized bitmap x
+ * the same way ethnl_update_bitset() with the same bitset attribute would.
+ *
+ * Return: negative error code on failure, 0 on success
+ */
+int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
+ unsigned int nbits, const struct nlattr *attr,
+ ethnl_string_array_t names,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[ETHTOOL_A_BITSET_MAX + 1];
+ const struct nlattr *bit_attr;
+ bool no_mask;
+ int rem;
+ int ret;
+
+ if (!attr)
+ return 0;
+ ret = nla_parse_nested(tb, ETHTOOL_A_BITSET_MAX, attr, bitset_policy,
+ extack);
+ if (ret < 0)
+ return ret;
+ no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
+
+ if (!tb[ETHTOOL_A_BITSET_BITS]) {
+ unsigned int change_bits;
+
+ ret = ethnl_compact_sanity_checks(nbits, attr, tb, extack);
+ if (ret < 0)
+ return ret;
+
+ change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]);
+ bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]),
+ change_bits);
+ if (change_bits < nbits)
+ bitmap_clear(val, change_bits, nbits - change_bits);
+ if (no_mask) {
+ bitmap_fill(mask, nbits);
+ } else {
+ bitmap_from_arr32(mask,
+ nla_data(tb[ETHTOOL_A_BITSET_MASK]),
+ change_bits);
+ if (change_bits < nbits)
+ bitmap_clear(mask, change_bits,
+ nbits - change_bits);
+ }
+
+ return 0;
+ }
+
+ if (tb[ETHTOOL_A_BITSET_VALUE]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_VALUE],
+ "value only allowed in compact bitset");
+ return -EINVAL;
+ }
+ if (tb[ETHTOOL_A_BITSET_MASK]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_MASK],
+ "mask only allowed in compact bitset");
+ return -EINVAL;
+ }
+
+ bitmap_zero(val, nbits);
+ if (no_mask)
+ bitmap_fill(mask, nbits);
+ else
+ bitmap_zero(mask, nbits);
+
+ nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
+ unsigned int idx;
+ bool bit_val;
+
+ ret = ethnl_parse_bit(&idx, &bit_val, nbits, bit_attr, no_mask,
+ names, extack);
+ if (ret < 0)
+ return ret;
+ if (bit_val)
+ __set_bit(idx, val);
+ if (!no_mask)
+ __set_bit(idx, mask);
+ }
+
+ return 0;
+}
+
#if BITS_PER_LONG == 64 && defined(__BIG_ENDIAN)
/* 64-bit big endian architectures are the only case when u32 based bitmaps
diff --git a/net/ethtool/bitset.h b/net/ethtool/bitset.h
index b849f9d19676..c2c2e0051d00 100644
--- a/net/ethtool/bitset.h
+++ b/net/ethtool/bitset.h
@@ -26,5 +26,9 @@ int ethnl_update_bitset(unsigned long *bitmap, unsigned int nbits,
int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits,
const struct nlattr *attr, ethnl_string_array_t names,
struct netlink_ext_ack *extack, bool *mod);
+int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
+ unsigned int nbits, const struct nlattr *attr,
+ ethnl_string_array_t names,
+ struct netlink_ext_ack *extack);
#endif /* _NET_ETHTOOL_BITSET_H */
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
new file mode 100644
index 000000000000..389924b65d05
--- /dev/null
+++ b/net/ethtool/channels.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <net/xdp_sock.h>
+
+#include "netlink.h"
+#include "common.h"
+
+struct channels_req_info {
+ struct ethnl_req_info base;
+};
+
+struct channels_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_channels channels;
+};
+
+#define CHANNELS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct channels_reply_data, base)
+
+static const struct nla_policy
+channels_get_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
+ [ETHTOOL_A_CHANNELS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_CHANNELS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_REJECT },
+};
+
+static int channels_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_channels)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ dev->ethtool_ops->get_channels(dev, &data->channels);
+ ethnl_ops_complete(dev);
+
+ return 0;
+}
+
+static int channels_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_COMBINED_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_COUNT */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_COUNT */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_COUNT */
+ nla_total_size(sizeof(u32)); /* _CHANNELS_COMBINED_COUNT */
+}
+
+static int channels_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
+ const struct ethtool_channels *channels = &data->channels;
+
+ if ((channels->max_rx &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_MAX,
+ channels->max_rx) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_COUNT,
+ channels->rx_count))) ||
+ (channels->max_tx &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_MAX,
+ channels->max_tx) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_COUNT,
+ channels->tx_count))) ||
+ (channels->max_other &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_MAX,
+ channels->max_other) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_COUNT,
+ channels->other_count))) ||
+ (channels->max_combined &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_MAX,
+ channels->max_combined) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_COUNT,
+ channels->combined_count))))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_channels_request_ops = {
+ .request_cmd = ETHTOOL_MSG_CHANNELS_GET,
+ .reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_CHANNELS_HEADER,
+ .max_attr = ETHTOOL_A_CHANNELS_MAX,
+ .req_info_size = sizeof(struct channels_req_info),
+ .reply_data_size = sizeof(struct channels_reply_data),
+ .request_policy = channels_get_policy,
+
+ .prepare_data = channels_prepare_data,
+ .reply_size = channels_reply_size,
+ .fill_reply = channels_fill_reply,
+};
+
+/* CHANNELS_SET */
+
+static const struct nla_policy
+channels_set_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
+ [ETHTOOL_A_CHANNELS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_CHANNELS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_U32 },
+ [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_U32 },
+ [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_U32 },
+ [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_U32 },
+};
+
+int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_CHANNELS_MAX + 1];
+ unsigned int from_channel, old_total, i;
+ struct ethtool_channels channels = {};
+ struct ethnl_req_info req_info = {};
+ const struct nlattr *err_attr;
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ u32 max_rx_in_use = 0;
+ bool mod = false;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_CHANNELS_MAX, channels_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_CHANNELS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_channels || !ops->set_channels)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ops->get_channels(dev, &channels);
+ old_total = channels.combined_count +
+ max(channels.rx_count, channels.tx_count);
+
+ ethnl_update_u32(&channels.rx_count, tb[ETHTOOL_A_CHANNELS_RX_COUNT],
+ &mod);
+ ethnl_update_u32(&channels.tx_count, tb[ETHTOOL_A_CHANNELS_TX_COUNT],
+ &mod);
+ ethnl_update_u32(&channels.other_count,
+ tb[ETHTOOL_A_CHANNELS_OTHER_COUNT], &mod);
+ ethnl_update_u32(&channels.combined_count,
+ tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT], &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ /* ensure new channel counts are within limits */
+ if (channels.rx_count > channels.max_rx)
+ err_attr = tb[ETHTOOL_A_CHANNELS_RX_COUNT];
+ else if (channels.tx_count > channels.max_tx)
+ err_attr = tb[ETHTOOL_A_CHANNELS_TX_COUNT];
+ else if (channels.other_count > channels.max_other)
+ err_attr = tb[ETHTOOL_A_CHANNELS_OTHER_COUNT];
+ else if (channels.combined_count > channels.max_combined)
+ err_attr = tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT];
+ else
+ err_attr = NULL;
+ if (err_attr) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
+ "requested channel count exceeds maximum");
+ goto out_ops;
+ }
+
+ /* ensure the new Rx count fits within the configured Rx flow
+ * indirection table settings
+ */
+ if (netif_is_rxfh_configured(dev) &&
+ !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
+ (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
+ return -EINVAL;
+ }
+
+ /* Disabling channels, query zero-copy AF_XDP sockets */
+ from_channel = channels.combined_count +
+ min(channels.rx_count, channels.tx_count);
+ for (i = from_channel; i < old_total; i++)
+ if (xdp_get_umem_from_qid(dev, i)) {
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
+ return -EINVAL;
+ }
+
+ ret = dev->ethtool_ops->set_channels(dev, &channels);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 7b6969af5ae7..0b22741b2f8f 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -258,3 +258,34 @@ int __ethtool_get_link(struct net_device *dev)
return netif_running(dev) && dev->ethtool_ops->get_link(dev);
}
+
+int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
+{
+ u32 dev_size, current_max = 0;
+ u32 *indir;
+ int ret;
+
+ if (!dev->ethtool_ops->get_rxfh_indir_size ||
+ !dev->ethtool_ops->get_rxfh)
+ return -EOPNOTSUPP;
+ dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+ if (dev_size == 0)
+ return -EOPNOTSUPP;
+
+ indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
+ if (!indir)
+ return -ENOMEM;
+
+ ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
+ if (ret)
+ goto out;
+
+ while (dev_size--)
+ current_max = max(current_max, indir[dev_size]);
+
+ *max = current_max;
+
+out:
+ kfree(indir);
+ return ret;
+}
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 40ba74e0b9bb..03946e16e623 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -6,6 +6,8 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#define ETHTOOL_DEV_FEATURE_WORDS DIV_ROUND_UP(NETDEV_FEATURE_COUNT, 32)
+
/* compose link mode index from speed, type and duplex */
#define ETHTOOL_LINK_MODE(speed, type, duplex) \
ETHTOOL_LINK_MODE_ ## speed ## base ## type ## _ ## duplex ## _BIT
@@ -27,5 +29,6 @@ int __ethtool_get_link(struct net_device *dev);
bool convert_legacy_settings_to_link_ksettings(
struct ethtool_link_ksettings *link_ksettings,
const struct ethtool_cmd *legacy_settings);
+int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
#endif /* _ETHTOOL_COMMON_H */
diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c
index aaef4843e6ba..87f288ee20c8 100644
--- a/net/ethtool/debug.c
+++ b/net/ethtool/debug.c
@@ -102,8 +102,10 @@ int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info)
info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_DEBUG_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_DEBUG_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
new file mode 100644
index 000000000000..4e632dc987d8
--- /dev/null
+++ b/net/ethtool/features.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+struct features_req_info {
+ struct ethnl_req_info base;
+};
+
+struct features_reply_data {
+ struct ethnl_reply_data base;
+ u32 hw[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 wanted[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 active[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 nochange[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 all[ETHTOOL_DEV_FEATURE_WORDS];
+};
+
+#define FEATURES_REPDATA(__reply_base) \
+ container_of(__reply_base, struct features_reply_data, base)
+
+static const struct nla_policy
+features_get_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
+ [ETHTOOL_A_FEATURES_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_FEATURES_HW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_ACTIVE] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_NOCHANGE] = { .type = NLA_REJECT },
+};
+
+static void ethnl_features_to_bitmap32(u32 *dest, netdev_features_t src)
+{
+ unsigned int i;
+
+ for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; i++)
+ dest[i] = src >> (32 * i);
+}
+
+static int features_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct features_reply_data *data = FEATURES_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ netdev_features_t all_features;
+
+ ethnl_features_to_bitmap32(data->hw, dev->hw_features);
+ ethnl_features_to_bitmap32(data->wanted, dev->wanted_features);
+ ethnl_features_to_bitmap32(data->active, dev->features);
+ ethnl_features_to_bitmap32(data->nochange, NETIF_F_NEVER_CHANGE);
+ all_features = GENMASK_ULL(NETDEV_FEATURE_COUNT - 1, 0);
+ ethnl_features_to_bitmap32(data->all, all_features);
+
+ return 0;
+}
+
+static int features_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct features_reply_data *data = FEATURES_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ unsigned int len = 0;
+ int ret;
+
+ ret = ethnl_bitset32_size(data->hw, data->all, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ ret = ethnl_bitset32_size(data->wanted, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ ret = ethnl_bitset32_size(data->active, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ ret = ethnl_bitset32_size(data->nochange, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+
+ return len;
+}
+
+static int features_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct features_reply_data *data = FEATURES_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ int ret;
+
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_HW, data->hw,
+ data->all, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_WANTED, data->wanted,
+ NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_ACTIVE, data->active,
+ NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ return ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_NOCHANGE,
+ data->nochange, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+}
+
+const struct ethnl_request_ops ethnl_features_request_ops = {
+ .request_cmd = ETHTOOL_MSG_FEATURES_GET,
+ .reply_cmd = ETHTOOL_MSG_FEATURES_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_FEATURES_HEADER,
+ .max_attr = ETHTOOL_A_FEATURES_MAX,
+ .req_info_size = sizeof(struct features_req_info),
+ .reply_data_size = sizeof(struct features_reply_data),
+ .request_policy = features_get_policy,
+
+ .prepare_data = features_prepare_data,
+ .reply_size = features_reply_size,
+ .fill_reply = features_fill_reply,
+};
+
+/* FEATURES_SET */
+
+static const struct nla_policy
+features_set_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
+ [ETHTOOL_A_FEATURES_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_FEATURES_HW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_NESTED },
+ [ETHTOOL_A_FEATURES_ACTIVE] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_NOCHANGE] = { .type = NLA_REJECT },
+};
+
+static void ethnl_features_to_bitmap(unsigned long *dest, netdev_features_t val)
+{
+ const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT);
+ unsigned int i;
+
+ bitmap_zero(dest, NETDEV_FEATURE_COUNT);
+ for (i = 0; i < words; i++)
+ dest[i] = (unsigned long)(val >> (i * BITS_PER_LONG));
+}
+
+static netdev_features_t ethnl_bitmap_to_features(unsigned long *src)
+{
+ const unsigned int nft_bits = sizeof(netdev_features_t) * BITS_PER_BYTE;
+ const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT);
+ netdev_features_t ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < words; i++)
+ ret |= (netdev_features_t)(src[i]) << (i * BITS_PER_LONG);
+ ret &= ~(netdev_features_t)0 >> (nft_bits - NETDEV_FEATURE_COUNT);
+ return ret;
+}
+
+static int features_send_reply(struct net_device *dev, struct genl_info *info,
+ const unsigned long *wanted,
+ const unsigned long *wanted_mask,
+ const unsigned long *active,
+ const unsigned long *active_mask, bool compact)
+{
+ struct sk_buff *rskb;
+ void *reply_payload;
+ int reply_len = 0;
+ int ret;
+
+ reply_len = ethnl_reply_header_size();
+ ret = ethnl_bitset_size(wanted, wanted_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto err;
+ reply_len += ret;
+ ret = ethnl_bitset_size(active, active_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto err;
+ reply_len += ret;
+
+ ret = -ENOMEM;
+ rskb = ethnl_reply_init(reply_len, dev, ETHTOOL_MSG_FEATURES_SET_REPLY,
+ ETHTOOL_A_FEATURES_HEADER, info,
+ &reply_payload);
+ if (!rskb)
+ goto err;
+
+ ret = ethnl_put_bitset(rskb, ETHTOOL_A_FEATURES_WANTED, wanted,
+ wanted_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto nla_put_failure;
+ ret = ethnl_put_bitset(rskb, ETHTOOL_A_FEATURES_ACTIVE, active,
+ active_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(rskb, reply_payload);
+ ret = genlmsg_reply(rskb, info);
+ return ret;
+
+nla_put_failure:
+ nlmsg_free(rskb);
+ WARN_ONCE(1, "calculated message payload length (%d) not sufficient\n",
+ reply_len);
+err:
+ GENL_SET_ERR_MSG(info, "failed to send reply message");
+ return ret;
+}
+
+int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+{
+ DECLARE_BITMAP(wanted_diff_mask, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(active_diff_mask, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(old_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(new_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT);
+ struct nlattr *tb[ETHTOOL_A_FEATURES_MAX + 1];
+ struct ethnl_req_info req_info = {};
+ struct net_device *dev;
+ bool mod;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_FEATURES_MAX, features_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ if (!tb[ETHTOOL_A_FEATURES_WANTED])
+ return -EINVAL;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_FEATURES_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+
+ rtnl_lock();
+ ethnl_features_to_bitmap(old_active, dev->features);
+ ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
+ tb[ETHTOOL_A_FEATURES_WANTED],
+ netdev_features_strings, info->extack);
+ if (ret < 0)
+ goto out_rtnl;
+ if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
+ GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
+ ret = -EINVAL;
+ goto out_rtnl;
+ }
+
+ /* set req_wanted bits not in req_mask from old_active */
+ bitmap_and(req_wanted, req_wanted, req_mask, NETDEV_FEATURE_COUNT);
+ bitmap_andnot(new_active, old_active, req_mask, NETDEV_FEATURE_COUNT);
+ bitmap_or(req_wanted, new_active, req_wanted, NETDEV_FEATURE_COUNT);
+ if (bitmap_equal(req_wanted, old_active, NETDEV_FEATURE_COUNT)) {
+ ret = 0;
+ goto out_rtnl;
+ }
+
+ dev->wanted_features = ethnl_bitmap_to_features(req_wanted);
+ __netdev_update_features(dev);
+ ethnl_features_to_bitmap(new_active, dev->features);
+ mod = !bitmap_equal(old_active, new_active, NETDEV_FEATURE_COUNT);
+
+ ret = 0;
+ if (!(req_info.flags & ETHTOOL_FLAG_OMIT_REPLY)) {
+ bool compact = req_info.flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+
+ bitmap_xor(wanted_diff_mask, req_wanted, new_active,
+ NETDEV_FEATURE_COUNT);
+ bitmap_xor(active_diff_mask, old_active, new_active,
+ NETDEV_FEATURE_COUNT);
+ bitmap_and(wanted_diff_mask, wanted_diff_mask, req_mask,
+ NETDEV_FEATURE_COUNT);
+ bitmap_and(req_wanted, req_wanted, wanted_diff_mask,
+ NETDEV_FEATURE_COUNT);
+ bitmap_and(new_active, new_active, active_diff_mask,
+ NETDEV_FEATURE_COUNT);
+
+ ret = features_send_reply(dev, info, req_wanted,
+ wanted_diff_mask, new_active,
+ active_diff_mask, compact);
+ }
+ if (mod)
+ ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
+
+out_rtnl:
+ rtnl_unlock();
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index b987052d91ef..258840b19fb5 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -56,8 +56,6 @@ EXPORT_SYMBOL(ethtool_op_get_ts_info);
/* Handlers for each ethtool command */
-#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
-
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gfeatures cmd = {
@@ -198,13 +196,14 @@ static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
switch (eth_cmd) {
case ETHTOOL_GTXCSUM:
case ETHTOOL_STXCSUM:
- return NETIF_F_CSUM_MASK | NETIF_F_SCTP_CRC;
+ return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC_BIT |
+ NETIF_F_SCTP_CRC;
case ETHTOOL_GRXCSUM:
case ETHTOOL_SRXCSUM:
return NETIF_F_RXCSUM;
case ETHTOOL_GSG:
case ETHTOOL_SSG:
- return NETIF_F_SG;
+ return NETIF_F_SG | NETIF_F_FRAGLIST;
case ETHTOOL_GTSO:
case ETHTOOL_STSO:
return NETIF_F_ALL_TSO;
@@ -459,6 +458,24 @@ static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to,
return 0;
}
+/* Check if the user is trying to change anything besides speed/duplex */
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd)
+{
+ struct ethtool_link_settings base2 = {};
+
+ base2.speed = cmd->base.speed;
+ base2.port = PORT_OTHER;
+ base2.duplex = cmd->base.duplex;
+ base2.cmd = cmd->base.cmd;
+ base2.link_mode_masks_nwords = cmd->base.link_mode_masks_nwords;
+
+ return !memcmp(&base2, &cmd->base, sizeof(base2)) &&
+ bitmap_empty(cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+ bitmap_empty(cmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
/* convert a kernel internal ethtool_link_ksettings to
* ethtool_link_usettings in user space. return 0 on success, errno on
* error.
@@ -581,6 +598,27 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
return err;
}
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex)
+{
+ u32 speed;
+ u8 duplex;
+
+ speed = cmd->base.speed;
+ duplex = cmd->base.duplex;
+ /* don't allow custom speed and duplex */
+ if (!ethtool_validate_speed(speed) ||
+ !ethtool_validate_duplex(duplex) ||
+ !ethtool_virtdev_validate_cmd(cmd))
+ return -EINVAL;
+ *dev_speed = speed;
+ *dev_duplex = duplex;
+
+ return 0;
+}
+EXPORT_SYMBOL(ethtool_virtdev_set_link_ksettings);
+
/* Query device for its ethtool_cmd settings.
*
* Backward compatibility note: for compatibility with legacy ethtool, this is
@@ -891,37 +929,6 @@ void netdev_rss_key_fill(void *buffer, size_t len)
}
EXPORT_SYMBOL(netdev_rss_key_fill);
-static int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
-{
- u32 dev_size, current_max = 0;
- u32 *indir;
- int ret;
-
- if (!dev->ethtool_ops->get_rxfh_indir_size ||
- !dev->ethtool_ops->get_rxfh)
- return -EOPNOTSUPP;
- dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
- if (dev_size == 0)
- return -EOPNOTSUPP;
-
- indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
- if (!indir)
- return -ENOMEM;
-
- ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
- if (ret)
- goto out;
-
- while (dev_size--)
- current_max = max(current_max, indir[dev_size]);
-
- *max = current_max;
-
-out:
- kfree(indir);
- return ret;
-}
-
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
@@ -1505,6 +1512,64 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
return 0;
}
+static bool
+ethtool_set_coalesce_supported(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ u32 supported_params = dev->ethtool_ops->supported_coalesce_params;
+ u32 nonzero_params = 0;
+
+ if (!supported_params)
+ return true;
+
+ if (coalesce->rx_coalesce_usecs)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS;
+ if (coalesce->rx_max_coalesced_frames)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES;
+ if (coalesce->rx_coalesce_usecs_irq)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ;
+ if (coalesce->rx_max_coalesced_frames_irq)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ;
+ if (coalesce->tx_coalesce_usecs)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS;
+ if (coalesce->tx_max_coalesced_frames)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES;
+ if (coalesce->tx_coalesce_usecs_irq)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ;
+ if (coalesce->tx_max_coalesced_frames_irq)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ;
+ if (coalesce->stats_block_coalesce_usecs)
+ nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS;
+ if (coalesce->use_adaptive_rx_coalesce)
+ nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX;
+ if (coalesce->use_adaptive_tx_coalesce)
+ nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX;
+ if (coalesce->pkt_rate_low)
+ nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW;
+ if (coalesce->rx_coalesce_usecs_low)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW;
+ if (coalesce->rx_max_coalesced_frames_low)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW;
+ if (coalesce->tx_coalesce_usecs_low)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW;
+ if (coalesce->tx_max_coalesced_frames_low)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW;
+ if (coalesce->pkt_rate_high)
+ nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH;
+ if (coalesce->rx_coalesce_usecs_high)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH;
+ if (coalesce->rx_max_coalesced_frames_high)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH;
+ if (coalesce->tx_coalesce_usecs_high)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH;
+ if (coalesce->tx_max_coalesced_frames_high)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH;
+ if (coalesce->rate_sample_interval)
+ nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL;
+
+ return (supported_params & nonzero_params) == nonzero_params;
+}
+
static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
void __user *useraddr)
{
@@ -1516,6 +1581,9 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
return -EFAULT;
+ if (!ethtool_set_coalesce_supported(dev, &coalesce))
+ return -EOPNOTSUPP;
+
return dev->ethtool_ops->set_coalesce(dev, &coalesce);
}
@@ -1536,6 +1604,7 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
+ int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
@@ -1552,7 +1621,10 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
ringparam.tx_pending > max.tx_max_pending)
return -EINVAL;
- return dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
+ return ret;
}
static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
@@ -1577,6 +1649,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
u16 from_channel, to_channel;
u32 max_rx_in_use = 0;
unsigned int i;
+ int ret;
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
return -EOPNOTSUPP;
@@ -1608,7 +1681,10 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
if (xdp_get_umem_from_qid(dev, i))
return -EINVAL;
- return dev->ethtool_ops->set_channels(dev, &channels);
+ ret = dev->ethtool_ops->set_channels(dev, &channels);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
+ return ret;
}
static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
@@ -2297,6 +2373,11 @@ ethtool_set_per_queue_coalesce(struct net_device *dev,
goto roll_back;
}
+ if (!ethtool_set_coalesce_supported(dev, &coalesce)) {
+ ret = -EOPNOTSUPP;
+ goto roll_back;
+ }
+
ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce);
if (ret != 0)
goto roll_back;
@@ -2612,6 +2693,8 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
dev->ethtool_ops->get_priv_flags);
+ if (!rc)
+ ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
break;
case ETHTOOL_SPFLAGS:
rc = ethtool_set_value(dev, useraddr,
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 5d16cb4e8693..2df420068cbb 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -121,8 +121,10 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_LINKINFO_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_LINKINFO_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index f049b97072fe..cb29cc8c5960 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -334,8 +334,10 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info)
info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_LINKMODES_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_LINKMODES_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 180c194fab07..55c8ce4019d9 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -18,7 +18,7 @@ static const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_MAX + 1] = {
};
/**
- * ethnl_parse_header() - parse request header
+ * ethnl_parse_header_dev_get() - parse request header
* @req_info: structure to put results into
* @header: nest attribute with request header
* @net: request netns
@@ -33,9 +33,9 @@ static const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_MAX + 1] = {
*
* Return: 0 on success or negative error code
*/
-int ethnl_parse_header(struct ethnl_req_info *req_info,
- const struct nlattr *header, struct net *net,
- struct netlink_ext_ack *extack, bool require_dev)
+int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
+ const struct nlattr *header, struct net *net,
+ struct netlink_ext_ack *extack, bool require_dev)
{
struct nlattr *tb[ETHTOOL_A_HEADER_MAX + 1];
const struct nlattr *devname_attr;
@@ -215,6 +215,10 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_LINKSTATE_GET] = &ethnl_linkstate_request_ops,
[ETHTOOL_MSG_DEBUG_GET] = &ethnl_debug_request_ops,
[ETHTOOL_MSG_WOL_GET] = &ethnl_wol_request_ops,
+ [ETHTOOL_MSG_FEATURES_GET] = &ethnl_features_request_ops,
+ [ETHTOOL_MSG_PRIVFLAGS_GET] = &ethnl_privflags_request_ops,
+ [ETHTOOL_MSG_RINGS_GET] = &ethnl_rings_request_ops,
+ [ETHTOOL_MSG_CHANNELS_GET] = &ethnl_channels_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -253,8 +257,8 @@ static int ethnl_default_parse(struct ethnl_req_info *req_info,
request_ops->request_policy, extack);
if (ret < 0)
goto out;
- ret = ethnl_parse_header(req_info, tb[request_ops->hdr_attr], net,
- extack, require_dev);
+ ret = ethnl_parse_header_dev_get(req_info, tb[request_ops->hdr_attr],
+ net, extack, require_dev);
if (ret < 0)
goto out;
@@ -527,6 +531,10 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_LINKMODES_NTF] = &ethnl_linkmodes_request_ops,
[ETHTOOL_MSG_DEBUG_NTF] = &ethnl_debug_request_ops,
[ETHTOOL_MSG_WOL_NTF] = &ethnl_wol_request_ops,
+ [ETHTOOL_MSG_FEATURES_NTF] = &ethnl_features_request_ops,
+ [ETHTOOL_MSG_PRIVFLAGS_NTF] = &ethnl_privflags_request_ops,
+ [ETHTOOL_MSG_RINGS_NTF] = &ethnl_rings_request_ops,
+ [ETHTOOL_MSG_CHANNELS_NTF] = &ethnl_channels_request_ops,
};
/* default notification handler */
@@ -612,6 +620,10 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_LINKMODES_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_DEBUG_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_WOL_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_FEATURES_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_PRIVFLAGS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_RINGS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_CHANNELS_NTF] = ethnl_default_notify,
};
void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
@@ -629,6 +641,29 @@ void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
}
EXPORT_SYMBOL(ethtool_notify);
+static void ethnl_notify_features(struct netdev_notifier_info *info)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(info);
+
+ ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
+}
+
+static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ switch (event) {
+ case NETDEV_FEAT_CHANGE:
+ ethnl_notify_features(ptr);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ethnl_netdev_notifier = {
+ .notifier_call = ethnl_netdev_event,
+};
+
/* genetlink setup */
static const struct genl_ops ethtool_genl_ops[] = {
@@ -695,6 +730,54 @@ static const struct genl_ops ethtool_genl_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_wol,
},
+ {
+ .cmd = ETHTOOL_MSG_FEATURES_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_FEATURES_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_features,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PRIVFLAGS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_privflags,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RINGS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RINGS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_rings,
+ },
+ {
+ .cmd = ETHTOOL_MSG_CHANNELS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_CHANNELS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_channels,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
@@ -723,7 +806,9 @@ static int __init ethnl_init(void)
return ret;
ethnl_ok = true;
- return 0;
+ ret = register_netdevice_notifier(&ethnl_netdev_notifier);
+ WARN(ret < 0, "ethtool: net device notifier registration failed");
+ return ret;
}
subsys_initcall(ethnl_init);
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 60efd87686ad..45aad99a6021 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -10,9 +10,10 @@
struct ethnl_req_info;
-int ethnl_parse_header(struct ethnl_req_info *req_info,
- const struct nlattr *nest, struct net *net,
- struct netlink_ext_ack *extack, bool require_dev);
+int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
+ const struct nlattr *nest, struct net *net,
+ struct netlink_ext_ack *extack,
+ bool require_dev);
int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev,
u16 attrtype);
struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd,
@@ -336,10 +337,18 @@ extern const struct ethnl_request_ops ethnl_linkmodes_request_ops;
extern const struct ethnl_request_ops ethnl_linkstate_request_ops;
extern const struct ethnl_request_ops ethnl_debug_request_ops;
extern const struct ethnl_request_ops ethnl_wol_request_ops;
+extern const struct ethnl_request_ops ethnl_features_request_ops;
+extern const struct ethnl_request_ops ethnl_privflags_request_ops;
+extern const struct ethnl_request_ops ethnl_rings_request_ops;
+extern const struct ethnl_request_ops ethnl_channels_request_ops;
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_features(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info);
#endif /* _NET_ETHTOOL_NETLINK_H */
diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c
new file mode 100644
index 000000000000..e8f03b33db9b
--- /dev/null
+++ b/net/ethtool/privflags.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+struct privflags_req_info {
+ struct ethnl_req_info base;
+};
+
+struct privflags_reply_data {
+ struct ethnl_reply_data base;
+ const char (*priv_flag_names)[ETH_GSTRING_LEN];
+ unsigned int n_priv_flags;
+ u32 priv_flags;
+};
+
+#define PRIVFLAGS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct privflags_reply_data, base)
+
+static const struct nla_policy
+privflags_get_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
+ [ETHTOOL_A_PRIVFLAGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PRIVFLAGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_REJECT },
+};
+
+static int ethnl_get_priv_flags_info(struct net_device *dev,
+ unsigned int *count,
+ const char (**names)[ETH_GSTRING_LEN])
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ int nflags;
+
+ nflags = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
+ if (nflags < 0)
+ return nflags;
+
+ if (names) {
+ *names = kcalloc(nflags, ETH_GSTRING_LEN, GFP_KERNEL);
+ if (!*names)
+ return -ENOMEM;
+ ops->get_strings(dev, ETH_SS_PRIV_FLAGS, (u8 *)*names);
+ }
+
+ /* We can pass more than 32 private flags to userspace via netlink but
+ * we cannot get more with ethtool_ops::get_priv_flags(). Note that we
+ * must not adjust nflags before allocating the space for flag names
+ * as the buffer must be large enough for all flags.
+ */
+ if (WARN_ONCE(nflags > 32,
+ "device %s reports more than 32 private flags (%d)\n",
+ netdev_name(dev), nflags))
+ nflags = 32;
+ *count = nflags;
+
+ return 0;
+}
+
+static int privflags_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ const char (*names)[ETH_GSTRING_LEN];
+ const struct ethtool_ops *ops;
+ unsigned int nflags;
+ int ret;
+
+ ops = dev->ethtool_ops;
+ if (!ops->get_priv_flags || !ops->get_sset_count || !ops->get_strings)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = ethnl_get_priv_flags_info(dev, &nflags, &names);
+ if (ret < 0)
+ goto out_ops;
+ data->priv_flags = ops->get_priv_flags(dev);
+ data->priv_flag_names = names;
+ data->n_priv_flags = nflags;
+
+out_ops:
+ ethnl_ops_complete(dev);
+ return ret;
+}
+
+static int privflags_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const u32 all_flags = ~(u32)0 >> (32 - data->n_priv_flags);
+
+ return ethnl_bitset32_size(&data->priv_flags, &all_flags,
+ data->n_priv_flags,
+ data->priv_flag_names, compact);
+}
+
+static int privflags_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const u32 all_flags = ~(u32)0 >> (32 - data->n_priv_flags);
+
+ return ethnl_put_bitset32(skb, ETHTOOL_A_PRIVFLAGS_FLAGS,
+ &data->priv_flags, &all_flags,
+ data->n_priv_flags, data->priv_flag_names,
+ compact);
+}
+
+static void privflags_cleanup_data(struct ethnl_reply_data *reply_data)
+{
+ struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_data);
+
+ kfree(data->priv_flag_names);
+}
+
+const struct ethnl_request_ops ethnl_privflags_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
+ .reply_cmd = ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PRIVFLAGS_HEADER,
+ .max_attr = ETHTOOL_A_PRIVFLAGS_MAX,
+ .req_info_size = sizeof(struct privflags_req_info),
+ .reply_data_size = sizeof(struct privflags_reply_data),
+ .request_policy = privflags_get_policy,
+
+ .prepare_data = privflags_prepare_data,
+ .reply_size = privflags_reply_size,
+ .fill_reply = privflags_fill_reply,
+ .cleanup_data = privflags_cleanup_data,
+};
+
+/* PRIVFLAGS_SET */
+
+static const struct nla_policy
+privflags_set_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
+ [ETHTOOL_A_PRIVFLAGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PRIVFLAGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_NESTED },
+};
+
+int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_PRIVFLAGS_MAX + 1];
+ const char (*names)[ETH_GSTRING_LEN] = NULL;
+ struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ unsigned int nflags;
+ bool mod = false;
+ bool compact;
+ u32 flags;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_PRIVFLAGS_MAX, privflags_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ if (!tb[ETHTOOL_A_PRIVFLAGS_FLAGS])
+ return -EINVAL;
+ ret = ethnl_bitset_is_compact(tb[ETHTOOL_A_PRIVFLAGS_FLAGS], &compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_PRIVFLAGS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ if (!ops->get_priv_flags || !ops->set_priv_flags ||
+ !ops->get_sset_count || !ops->get_strings)
+ return -EOPNOTSUPP;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ret = ethnl_get_priv_flags_info(dev, &nflags, compact ? NULL : &names);
+ if (ret < 0)
+ goto out_ops;
+ flags = ops->get_priv_flags(dev);
+
+ ret = ethnl_update_bitset32(&flags, nflags,
+ tb[ETHTOOL_A_PRIVFLAGS_FLAGS], names,
+ info->extack, &mod);
+ if (ret < 0 || !mod)
+ goto out_free;
+ ret = ops->set_priv_flags(dev, flags);
+ if (ret < 0)
+ goto out_free;
+ ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
+
+out_free:
+ kfree(names);
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
new file mode 100644
index 000000000000..5422526f4eef
--- /dev/null
+++ b/net/ethtool/rings.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+
+struct rings_req_info {
+ struct ethnl_req_info base;
+};
+
+struct rings_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_ringparam ringparam;
+};
+
+#define RINGS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct rings_reply_data, base)
+
+static const struct nla_policy
+rings_get_policy[ETHTOOL_A_RINGS_MAX + 1] = {
+ [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_TX] = { .type = NLA_REJECT },
+};
+
+static int rings_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ dev->ethtool_ops->get_ringparam(dev, &data->ringparam);
+ ethnl_ops_complete(dev);
+
+ return 0;
+}
+
+static int rings_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
+ nla_total_size(sizeof(u32)); /* _RINGS_TX */
+}
+
+static int rings_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ const struct ethtool_ringparam *ringparam = &data->ringparam;
+
+ if ((ringparam->rx_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX,
+ ringparam->rx_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_RX,
+ ringparam->rx_pending))) ||
+ (ringparam->rx_mini_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX,
+ ringparam->rx_mini_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI,
+ ringparam->rx_mini_pending))) ||
+ (ringparam->rx_jumbo_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX,
+ ringparam->rx_jumbo_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO,
+ ringparam->rx_jumbo_pending))) ||
+ (ringparam->tx_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX,
+ ringparam->tx_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
+ ringparam->tx_pending))))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_rings_request_ops = {
+ .request_cmd = ETHTOOL_MSG_RINGS_GET,
+ .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_RINGS_HEADER,
+ .max_attr = ETHTOOL_A_RINGS_MAX,
+ .req_info_size = sizeof(struct rings_req_info),
+ .reply_data_size = sizeof(struct rings_reply_data),
+ .request_policy = rings_get_policy,
+
+ .prepare_data = rings_prepare_data,
+ .reply_size = rings_reply_size,
+ .fill_reply = rings_fill_reply,
+};
+
+/* RINGS_SET */
+
+static const struct nla_policy
+rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = {
+ [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
+};
+
+int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_RINGS_MAX + 1];
+ struct ethtool_ringparam ringparam = {};
+ struct ethnl_req_info req_info = {};
+ const struct nlattr *err_attr;
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ bool mod = false;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_RINGS_MAX, rings_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_RINGS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_ringparam || !ops->set_ringparam)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ops->get_ringparam(dev, &ringparam);
+
+ ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
+ ethnl_update_u32(&ringparam.rx_mini_pending,
+ tb[ETHTOOL_A_RINGS_RX_MINI], &mod);
+ ethnl_update_u32(&ringparam.rx_jumbo_pending,
+ tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod);
+ ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ /* ensure new ring parameters are within limits */
+ if (ringparam.rx_pending > ringparam.rx_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_RX];
+ else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_RX_MINI];
+ else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO];
+ else if (ringparam.tx_pending > ringparam.tx_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_TX];
+ else
+ err_attr = NULL;
+ if (err_attr) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
+ "requested ring size exceeds maximum");
+ goto out_ops;
+ }
+
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index e1b8a65b64c4..1d2bcabee554 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -123,8 +123,9 @@ int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info)
wol_set_policy, info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_WOL_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_WOL_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index d5f709b940ff..9787ef11ca71 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -113,7 +113,6 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
priv->node_tbl_root = NULL;
return;
}
- priv->node_tbl_file = de;
}
/* hsr_debugfs_term - Tear down debugfs intrastructure
@@ -125,9 +124,7 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
void
hsr_debugfs_term(struct hsr_priv *priv)
{
- debugfs_remove(priv->node_tbl_file);
- priv->node_tbl_file = NULL;
- debugfs_remove(priv->node_tbl_root);
+ debugfs_remove_recursive(priv->node_tbl_root);
priv->node_tbl_root = NULL;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c7bd6c49fadf..fc7027314ad8 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -57,24 +57,19 @@ static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
static bool hsr_check_carrier(struct hsr_port *master)
{
struct hsr_port *port;
- bool has_carrier;
- has_carrier = false;
+ ASSERT_RTNL();
- rcu_read_lock();
- hsr_for_each_port(master->hsr, port)
+ hsr_for_each_port(master->hsr, port) {
if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
- has_carrier = true;
- break;
+ netif_carrier_on(master->dev);
+ return true;
}
- rcu_read_unlock();
+ }
- if (has_carrier)
- netif_carrier_on(master->dev);
- else
- netif_carrier_off(master->dev);
+ netif_carrier_off(master->dev);
- return has_carrier;
+ return false;
}
static void hsr_check_announce(struct net_device *hsr_dev,
@@ -118,11 +113,9 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
struct hsr_port *port;
mtu_max = ETH_DATA_LEN;
- rcu_read_lock();
hsr_for_each_port(hsr, port)
if (port->type != HSR_PT_MASTER)
mtu_max = min(port->dev->mtu, mtu_max);
- rcu_read_unlock();
if (mtu_max < HSR_HLEN)
return 0;
@@ -157,7 +150,6 @@ static int hsr_dev_open(struct net_device *dev)
hsr = netdev_priv(dev);
designation = '\0';
- rcu_read_lock();
hsr_for_each_port(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
@@ -175,7 +167,6 @@ static int hsr_dev_open(struct net_device *dev)
netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
designation, port->dev->name);
}
- rcu_read_unlock();
if (designation == '\0')
netdev_warn(dev, "No slave devices configured\n");
@@ -350,22 +341,33 @@ static void hsr_announce(struct timer_list *t)
rcu_read_unlock();
}
+static void hsr_del_ports(struct hsr_priv *hsr)
+{
+ struct hsr_port *port;
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (port)
+ hsr_del_port(port);
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ if (port)
+ hsr_del_port(port);
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (port)
+ hsr_del_port(port);
+}
+
/* This has to be called after all the readers are gone.
* Otherwise we would have to check the return value of
* hsr_port_get_hsr().
*/
static void hsr_dev_destroy(struct net_device *hsr_dev)
{
- struct hsr_priv *hsr;
- struct hsr_port *port;
- struct hsr_port *tmp;
-
- hsr = netdev_priv(hsr_dev);
+ struct hsr_priv *hsr = netdev_priv(hsr_dev);
hsr_debugfs_term(hsr);
-
- list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
- hsr_del_port(port);
+ hsr_del_ports(hsr);
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->announce_timer);
@@ -431,11 +433,10 @@ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
};
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
- unsigned char multicast_spec, u8 protocol_version)
+ unsigned char multicast_spec, u8 protocol_version,
+ struct netlink_ext_ack *extack)
{
struct hsr_priv *hsr;
- struct hsr_port *port;
- struct hsr_port *tmp;
int res;
hsr = netdev_priv(hsr_dev);
@@ -478,7 +479,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
/* Make sure the 1st call to netif_carrier_on() gets through */
netif_carrier_off(hsr_dev);
- res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
+ res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
if (res)
goto err_add_master;
@@ -486,11 +487,11 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
if (res)
goto err_unregister;
- res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
+ res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
if (res)
goto err_add_slaves;
- res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
+ res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
if (res)
goto err_add_slaves;
@@ -502,8 +503,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
err_add_slaves:
unregister_netdevice(hsr_dev);
err_unregister:
- list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
- hsr_del_port(port);
+ hsr_del_ports(hsr);
err_add_master:
hsr_del_self_node(hsr);
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
index 6d7759c4f5f9..a099d7de7e79 100644
--- a/net/hsr/hsr_device.h
+++ b/net/hsr/hsr_device.h
@@ -13,7 +13,8 @@
void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
- unsigned char multicast_spec, u8 protocol_version);
+ unsigned char multicast_spec, u8 protocol_version,
+ struct netlink_ext_ack *extack);
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr);
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 3ba7f61be107..d46d22c7105c 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -318,7 +318,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
node_dst = find_node_by_addr_A(&port->hsr->node_db,
eth_hdr(skb)->h_dest);
if (!node_dst) {
- WARN_ONCE(1, "%s: Unknown node\n", __func__);
+ if (net_ratelimit())
+ netdev_err(skb->dev, "%s: Unknown node\n", __func__);
return;
}
if (port->type != node_dst->addr_B_port)
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 9e389accbfc7..26d6c39f24e1 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -85,7 +85,8 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
master->dev->mtu = mtu_max;
break;
case NETDEV_UNREGISTER:
- hsr_del_port(port);
+ if (!is_hsr_master(dev))
+ hsr_del_port(port);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* HSR works only on Ethernet devices. Refuse slave to change
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 754d84b217f0..7321cf8d6d2c 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -166,7 +166,6 @@ struct hsr_priv {
unsigned char sup_multicast_addr[ETH_ALEN];
#ifdef CONFIG_DEBUG_FS
struct dentry *node_tbl_root;
- struct dentry *node_tbl_file;
#endif
};
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 8dc0547f01d0..64d39c1e93a2 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -35,26 +35,34 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
unsigned char multicast_spec, hsr_version;
if (!data) {
- netdev_info(dev, "HSR: No slave devices specified\n");
+ NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
}
if (!data[IFLA_HSR_SLAVE1]) {
- netdev_info(dev, "HSR: Slave1 device not specified\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
return -EINVAL;
}
link[0] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE1]));
+ if (!link[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
+ return -EINVAL;
+ }
if (!data[IFLA_HSR_SLAVE2]) {
- netdev_info(dev, "HSR: Slave2 device not specified\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
return -EINVAL;
}
link[1] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE2]));
+ if (!link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
+ return -EINVAL;
+ }
- if (!link[0] || !link[1])
- return -ENODEV;
- if (link[0] == link[1])
+ if (link[0] == link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
return -EINVAL;
+ }
if (!data[IFLA_HSR_MULTICAST_SPEC])
multicast_spec = 0;
@@ -66,34 +74,25 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
else
hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
- return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
+ return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
}
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
- struct hsr_priv *hsr;
+ struct hsr_priv *hsr = netdev_priv(dev);
struct hsr_port *port;
- int res;
-
- hsr = netdev_priv(dev);
-
- res = 0;
- rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
- if (port)
- res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
- rcu_read_unlock();
- if (res)
- goto nla_put_failure;
+ if (port) {
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
+ goto nla_put_failure;
+ }
- rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
- if (port)
- res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
- rcu_read_unlock();
- if (res)
- goto nla_put_failure;
+ if (port) {
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
+ goto nla_put_failure;
+ }
if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
hsr->sup_multicast_addr) ||
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index fbfd0db182b7..d3547e8c6d5b 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -25,7 +25,6 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- rcu_read_lock(); /* hsr->node_db, hsr->ports */
port = hsr_port_get_rcu(skb->dev);
if (!port)
goto finish_pass;
@@ -45,11 +44,9 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
hsr_forward_skb(skb, port);
finish_consume:
- rcu_read_unlock(); /* hsr->node_db, hsr->ports */
return RX_HANDLER_CONSUMED;
finish_pass:
- rcu_read_unlock(); /* hsr->node_db, hsr->ports */
return RX_HANDLER_PASS;
}
@@ -58,33 +55,37 @@ bool hsr_port_exists(const struct net_device *dev)
return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
}
-static int hsr_check_dev_ok(struct net_device *dev)
+static int hsr_check_dev_ok(struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
/* Don't allow HSR on non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
dev->addr_len != ETH_ALEN) {
- netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
return -EINVAL;
}
/* Don't allow enslaving hsr devices */
if (is_hsr_master(dev)) {
- netdev_info(dev, "Cannot create trees of HSR devices.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot create trees of HSR devices.");
return -EINVAL;
}
if (hsr_port_exists(dev)) {
- netdev_info(dev, "This device is already a HSR slave.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "This device is already a HSR slave.");
return -EINVAL;
}
if (is_vlan_dev(dev)) {
- netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
+ NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
return -EINVAL;
}
if (dev->priv_flags & IFF_DONT_BRIDGE) {
- netdev_info(dev, "This device does not support bridging.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "This device does not support bridging.");
return -EOPNOTSUPP;
}
@@ -96,19 +97,25 @@ static int hsr_check_dev_ok(struct net_device *dev)
}
/* Setup device to be added to the HSR bridge. */
-static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
+static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ struct hsr_port *port,
+ struct netlink_ext_ack *extack)
+
{
+ struct net_device *hsr_dev;
+ struct hsr_port *master;
int res;
- dev_hold(dev);
res = dev_set_promiscuity(dev, 1);
if (res)
- goto fail_promiscuity;
+ return res;
- /* FIXME:
- * What does net device "adjacency" mean? Should we do
- * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ?
- */
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ hsr_dev = master->dev;
+
+ res = netdev_upper_dev_link(dev, hsr_dev, extack);
+ if (res)
+ goto fail_upper_dev_link;
res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
if (res)
@@ -118,21 +125,20 @@ static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
return 0;
fail_rx_handler:
+ netdev_upper_dev_unlink(dev, hsr_dev);
+fail_upper_dev_link:
dev_set_promiscuity(dev, -1);
-fail_promiscuity:
- dev_put(dev);
-
return res;
}
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
- enum hsr_port_type type)
+ enum hsr_port_type type, struct netlink_ext_ack *extack)
{
struct hsr_port *port, *master;
int res;
if (type != HSR_PT_MASTER) {
- res = hsr_check_dev_ok(dev);
+ res = hsr_check_dev_ok(dev, extack);
if (res)
return res;
}
@@ -146,7 +152,7 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
return -ENOMEM;
if (type != HSR_PT_MASTER) {
- res = hsr_portdev_setup(dev, port);
+ res = hsr_portdev_setup(hsr, dev, port, extack);
if (res)
goto fail_dev_setup;
}
@@ -179,21 +185,14 @@ void hsr_del_port(struct hsr_port *port)
list_del_rcu(&port->port_list);
if (port != master) {
- if (master) {
- netdev_update_features(master->dev);
- dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
- }
+ netdev_update_features(master->dev);
+ dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
netdev_rx_handler_unregister(port->dev);
dev_set_promiscuity(port->dev, -1);
+ netdev_upper_dev_unlink(port->dev, master->dev);
}
- /* FIXME?
- * netdev_upper_dev_unlink(port->dev, port->hsr->dev);
- */
-
synchronize_rcu();
- if (port != master)
- dev_put(port->dev);
kfree(port);
}
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h
index 64b549529592..8953ea279ce9 100644
--- a/net/hsr/hsr_slave.h
+++ b/net/hsr/hsr_slave.h
@@ -13,7 +13,7 @@
#include "hsr_main.h"
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
- enum hsr_port_type pt);
+ enum hsr_port_type pt, struct netlink_ext_ack *extack);
void hsr_del_port(struct hsr_port *port);
bool hsr_port_exists(const struct net_device *dev);
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 2c7a38d76a3a..0672b2f01586 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
[IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 2fe295432c24..bd7b4e92e07f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -872,7 +872,7 @@ int inet_shutdown(struct socket *sock, int how)
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
- /* fall through */
+ fallthrough;
default:
sk->sk_shutdown |= how;
if (sk->sk_prot->shutdown)
@@ -886,7 +886,7 @@ int inet_shutdown(struct socket *sock, int how)
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
- /* fall through */
+ fallthrough;
case TCP_SYN_SENT:
err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 974179b3b314..d99e1be94019 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -107,7 +107,7 @@ static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
if (optlen < 6)
return -EINVAL;
memcpy(daddr, optptr+optlen-4, 4);
- /* Fall through */
+ fallthrough;
default:
memset(optptr, 0, optlen);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 05eb42f347e8..687971d83b4e 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1181,7 +1181,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSARP:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
case SIOCGARP:
err = copy_from_user(&r, arg, sizeof(struct arpreq));
if (err)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e4632bd2026d..30fa42f5997d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1566,11 +1566,11 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
}
}
ip_mc_up(in_dev);
- /* fall through */
+ fallthrough;
case NETDEV_CHANGEADDR:
if (!IN_DEV_ARP_NOTIFY(in_dev))
break;
- /* fall through */
+ fallthrough;
case NETDEV_NOTIFY_PEERS:
/* Send gratuitous ARP to notify of link change */
inetdev_send_gratuitous_arp(dev, in_dev);
@@ -1588,7 +1588,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (inetdev_valid_mtu(dev->mtu))
break;
/* disable IP when MTU is not enough */
- /* fall through */
+ fallthrough;
case NETDEV_UNREGISTER:
inetdev_destroy(in_dev);
break;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a803cdd9400a..e4c62b8f57a8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1962,7 +1962,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
nexthop_nh->fib_nh_flags |= RTNH_F_DEAD;
- /* fall through */
+ fallthrough;
case NETDEV_CHANGE:
nexthop_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
break;
@@ -1984,7 +1984,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
fi->fib_flags |= RTNH_F_DEAD;
- /* fall through */
+ fallthrough;
case NETDEV_CHANGE:
fi->fib_flags |= RTNH_F_LINKDOWN;
break;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 5fd6e8ed02b5..66fdbfe5447c 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
}
EXPORT_SYMBOL_GPL(gre_del_protocol);
-/* Fills in tpi and returns header length to be pulled. */
+/* Fills in tpi and returns header length to be pulled.
+ * Note that caller must use pskb_may_pull() before pulling GRE header.
+ */
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs)
{
@@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+ u8 _val, *val;
+
+ val = skb_header_pointer(skb, nhs + hdr_len,
+ sizeof(_val), &_val);
+ if (!val)
+ return -EINVAL;
tpi->proto = proto;
- if ((*(u8 *)options & 0xF0) != 0x40)
+ if ((*val & 0xF0) != 0x40)
hdr_len += 4;
}
tpi->hdr_len = hdr_len;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f369e7ce685b..fc61f51d87a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -865,7 +865,7 @@ static bool icmp_unreach(struct sk_buff *skb)
case 3:
if (!icmp_tag_validation(iph->protocol))
goto out;
- /* fall through */
+ fallthrough;
case 0:
info = ntohs(icmph->un.frag.mtu);
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index a4db79b1b643..5f34eb951627 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -131,7 +131,7 @@ static int inet_csk_bind_conflict(const struct sock *sk,
{
struct sock *sk2;
bool reuse = sk->sk_reuse;
- bool reuseport = !!sk->sk_reuseport && reuseport_ok;
+ bool reuseport = !!sk->sk_reuseport;
kuid_t uid = sock_i_uid((struct sock *)sk);
/*
@@ -146,17 +146,21 @@ static int inet_csk_bind_conflict(const struct sock *sk,
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if ((!reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
- (!reuseport || !sk2->sk_reuseport ||
- rcu_access_pointer(sk->sk_reuseport_cb) ||
- (sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(uid, sock_i_uid(sk2))))) {
- if (inet_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- if (!relax && reuse && sk2->sk_reuse &&
+ if (reuse && sk2->sk_reuse &&
sk2->sk_state != TCP_LISTEN) {
+ if ((!relax ||
+ (!reuseport_ok &&
+ reuseport && sk2->sk_reuseport &&
+ !rcu_access_pointer(sk->sk_reuseport_cb) &&
+ (sk2->sk_state == TCP_TIME_WAIT ||
+ uid_eq(uid, sock_i_uid(sk2))))) &&
+ inet_rcv_saddr_equal(sk, sk2, true))
+ break;
+ } else if (!reuseport_ok ||
+ !reuseport || !sk2->sk_reuseport ||
+ rcu_access_pointer(sk->sk_reuseport_cb) ||
+ (sk2->sk_state != TCP_TIME_WAIT &&
+ !uid_eq(uid, sock_i_uid(sk2)))) {
if (inet_rcv_saddr_equal(sk, sk2, true))
break;
}
@@ -176,12 +180,14 @@ inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *
int port = 0;
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
+ bool relax = false;
int i, low, high, attempt_half;
struct inet_bind_bucket *tb;
u32 remaining, offset;
int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk);
+ports_exhausted:
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
other_half_scan:
inet_get_local_port_range(net, &low, &high);
@@ -219,7 +225,7 @@ other_parity_scan:
inet_bind_bucket_for_each(tb, &head->chain)
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
tb->port == port) {
- if (!inet_csk_bind_conflict(sk, tb, false, false))
+ if (!inet_csk_bind_conflict(sk, tb, relax, false))
goto success;
goto next_port;
}
@@ -239,6 +245,12 @@ next_port:
attempt_half = 2;
goto other_half_scan;
}
+
+ if (net->ipv4.sysctl_ip_autobind_reuse && !relax) {
+ /* We still have a chance to connect to different destinations */
+ relax = true;
+ goto ports_exhausted;
+ }
return NULL;
success:
*port_ret = port;
@@ -482,8 +494,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
}
spin_unlock_bh(&queue->fastopenq.lock);
}
+
out:
release_sock(sk);
+ if (newsk && mem_cgroup_sockets_enabled) {
+ int amt;
+
+ /* atomically get the memory usage, set and charge the
+ * newsk->sk_memcg.
+ */
+ lock_sock(newsk);
+
+ /* The socket has not been accepted yet, no need to look at
+ * newsk->sk_wmem_queued.
+ */
+ amt = sk_mem_pages(newsk->sk_forward_alloc +
+ atomic_read(&newsk->sk_rmem_alloc));
+ mem_cgroup_sk_alloc(newsk);
+ if (newsk->sk_memcg && amt)
+ mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
+
+ release_sock(newsk);
+ }
if (req)
reqsk_put(req);
return newsk;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f11e997e517b..5d50aad3cdbf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -23,6 +23,7 @@
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/inet6_hashtables.h>
+#include <net/bpf_sk_storage.h>
#include <net/netlink.h>
#include <linux/inet.h>
@@ -100,13 +101,9 @@ static size_t inet_sk_attr_size(struct sock *sk,
aux = handler->idiag_get_aux_size(sk, net_admin);
return nla_total_size(sizeof(struct tcp_info))
- + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
- + nla_total_size(1) /* INET_DIAG_TOS */
- + nla_total_size(1) /* INET_DIAG_TCLASS */
- + nla_total_size(4) /* INET_DIAG_MARK */
- + nla_total_size(4) /* INET_DIAG_CLASS_ID */
- + nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ + inet_diag_msg_attrs_size()
+ + nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+ nla_total_size(TCP_CA_NAME_MAX)
+ nla_total_size(sizeof(struct tcpvegas_info))
@@ -147,6 +144,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
goto errout;
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+ ext & (1 << (INET_DIAG_TCLASS - 1))) {
+ u32 classid = 0;
+
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ classid = sock_cgroup_classid(&sk->sk_cgrp_data);
+#endif
+ /* Fallback to socket priority if class id isn't set.
+ * Classful qdiscs use it as direct reference to class.
+ * For cgroup2 classid is always zero.
+ */
+ if (!classid)
+ classid = sk->sk_priority;
+
+ if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
+ goto errout;
+ }
+
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
@@ -156,26 +171,28 @@ errout:
}
EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
+#define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh,
- bool net_admin)
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin)
{
const struct tcp_congestion_ops *ca_ops;
const struct inet_diag_handler *handler;
+ struct inet_diag_dump_data *cb_data;
int ext = req->idiag_ext;
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
+ cb_data = cb->data;
handler = inet_diag_table[req->sdiag_protocol];
BUG_ON(!handler);
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- nlmsg_flags);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
@@ -187,7 +204,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 0;
r->idiag_retrans = 0;
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -284,22 +303,46 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto errout;
}
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
- ext & (1 << (INET_DIAG_TCLASS - 1))) {
- u32 classid = 0;
-
-#ifdef CONFIG_SOCK_CGROUP_DATA
- classid = sock_cgroup_classid(&sk->sk_cgrp_data);
-#endif
- /* Fallback to socket priority if class id isn't set.
- * Classful qdiscs use it as direct reference to class.
- * For cgroup2 classid is always zero.
- */
- if (!classid)
- classid = sk->sk_priority;
+ /* Keep it at the end for potential retry with a larger skb,
+ * or else do best-effort fitting, which is only done for the
+ * first_nlmsg.
+ */
+ if (cb_data->bpf_stg_diag) {
+ bool first_nlmsg = ((unsigned char *)nlh == skb->data);
+ unsigned int prev_min_dump_alloc;
+ unsigned int total_nla_size = 0;
+ unsigned int msg_len;
+ int err;
+
+ msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
+ err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb,
+ INET_DIAG_SK_BPF_STORAGES,
+ &total_nla_size);
+
+ if (!err)
+ goto out;
+
+ total_nla_size += msg_len;
+ prev_min_dump_alloc = cb->min_dump_alloc;
+ if (total_nla_size > prev_min_dump_alloc)
+ cb->min_dump_alloc = min_t(u32, total_nla_size,
+ MAX_DUMP_ALLOC_SIZE);
+
+ if (!first_nlmsg)
+ goto errout;
- if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
+ if (cb->min_dump_alloc > prev_min_dump_alloc)
+ /* Retry with pskb_expand_head() with
+ * __GFP_DIRECT_RECLAIM
+ */
goto errout;
+
+ WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc);
+
+ /* Send what we have for this sk
+ * and move on to the next sk in the following
+ * dump()
+ */
}
out:
@@ -312,30 +355,19 @@ errout:
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
-static int inet_csk_diag_fill(struct sock *sk,
- struct sk_buff *skb,
- const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh,
- bool net_admin)
-{
- return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
- portid, seq, nlmsg_flags, unlh, net_admin);
-}
-
static int inet_twsk_diag_fill(struct sock *sk,
struct sk_buff *skb,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ struct netlink_callback *cb,
+ u16 nlmsg_flags)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- nlmsg_flags);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
+ sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
@@ -359,16 +391,16 @@ static int inet_twsk_diag_fill(struct sock *sk,
}
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin)
+ struct netlink_callback *cb,
+ u16 nlmsg_flags, bool net_admin)
{
struct request_sock *reqsk = inet_reqsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- nlmsg_flags);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
@@ -397,21 +429,18 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin)
+ u16 nlmsg_flags, bool net_admin)
{
if (sk->sk_state == TCP_TIME_WAIT)
- return inet_twsk_diag_fill(sk, skb, portid, seq,
- nlmsg_flags, unlh);
+ return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
if (sk->sk_state == TCP_NEW_SYN_RECV)
- return inet_req_diag_fill(sk, skb, portid, seq,
- nlmsg_flags, unlh, net_admin);
+ return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
- return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
- nlmsg_flags, unlh, net_admin);
+ return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
+ net_admin);
}
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -459,10 +488,10 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
+ struct sk_buff *in_skb = cb->skb;
bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
struct net *net = sock_net(in_skb->sk);
struct sk_buff *rep;
@@ -479,10 +508,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
goto out;
}
- err = sk_diag_fill(sk, rep, req,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh, net_admin);
+ err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
nlmsg_free(rep);
@@ -509,14 +535,21 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
int err;
handler = inet_diag_lock_handler(req->sdiag_protocol);
- if (IS_ERR(handler))
+ if (IS_ERR(handler)) {
err = PTR_ERR(handler);
- else if (cmd == SOCK_DIAG_BY_FAMILY)
- err = handler->dump_one(in_skb, nlh, req);
- else if (cmd == SOCK_DESTROY && handler->destroy)
+ } else if (cmd == SOCK_DIAG_BY_FAMILY) {
+ struct inet_diag_dump_data empty_dump_data = {};
+ struct netlink_callback cb = {
+ .nlh = nlh,
+ .skb = in_skb,
+ .data = &empty_dump_data,
+ };
+ err = handler->dump_one(&cb, req);
+ } else if (cmd == SOCK_DESTROY && handler->destroy) {
err = handler->destroy(in_skb, req);
- else
+ } else {
err = -EOPNOTSUPP;
+ }
inet_diag_unlock_handler(handler);
return err;
@@ -847,23 +880,6 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
return len == 0 ? 0 : -EINVAL;
}
-static int inet_csk_diag_dump(struct sock *sk,
- struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- const struct nlattr *bc,
- bool net_admin)
-{
- if (!inet_diag_bc_sk(bc, sk))
- return 0;
-
- return inet_csk_diag_fill(sk, skb, r,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh,
- net_admin);
-}
-
static void twsk_build_assert(void)
{
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
@@ -892,14 +908,17 @@ static void twsk_build_assert(void)
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
+ struct inet_diag_dump_data *cb_data = cb->data;
struct net *net = sock_net(skb->sk);
u32 idiag_states = r->idiag_states;
int i, num, s_i, s_num;
+ struct nlattr *bc;
struct sock *sk;
+ bc = cb_data->inet_diag_nla_bc;
if (idiag_states & TCPF_SYN_RECV)
idiag_states |= TCPF_NEW_SYN_RECV;
s_i = cb->args[1];
@@ -935,8 +954,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
r->id.idiag_sport)
goto next_listen;
- if (inet_csk_diag_dump(sk, skb, cb, r,
- bc, net_admin) < 0) {
+ if (!inet_diag_bc_sk(bc, sk))
+ goto next_listen;
+
+ if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
+ cb, r, NLM_F_MULTI,
+ net_admin) < 0) {
spin_unlock(&ilb->lock);
goto done;
}
@@ -1014,11 +1037,8 @@ next_normal:
res = 0;
for (idx = 0; idx < accum; idx++) {
if (res >= 0) {
- res = sk_diag_fill(sk_arr[idx], skb, r,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh, net_admin);
+ res = sk_diag_fill(sk_arr[idx], skb, cb, r,
+ NLM_F_MULTI, net_admin);
if (res < 0)
num = num_arr[idx];
}
@@ -1042,31 +1062,101 @@ out:
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
const struct inet_diag_handler *handler;
+ u32 prev_min_dump_alloc;
int err = 0;
+again:
+ prev_min_dump_alloc = cb->min_dump_alloc;
handler = inet_diag_lock_handler(r->sdiag_protocol);
if (!IS_ERR(handler))
- handler->dump(skb, cb, r, bc);
+ handler->dump(skb, cb, r);
else
err = PTR_ERR(handler);
inet_diag_unlock_handler(handler);
+ /* The skb is not large enough to fit one sk info and
+ * inet_sk_diag_fill() has requested for a larger skb.
+ */
+ if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) {
+ err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL);
+ if (!err)
+ goto again;
+ }
+
return err ? : skb->len;
}
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- int hdrlen = sizeof(struct inet_diag_req_v2);
- struct nlattr *bc = NULL;
+ return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh));
+}
+
+static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
+{
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct inet_diag_dump_data *cb_data;
+ struct sk_buff *skb = cb->skb;
+ struct nlattr *nla;
+ int rem, err;
+
+ cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+
+ nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), rem) {
+ int type = nla_type(nla);
- if (nlmsg_attrlen(cb->nlh, hdrlen))
- bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+ if (type < __INET_DIAG_REQ_MAX)
+ cb_data->req_nlas[type] = nla;
+ }
+
+ nla = cb_data->inet_diag_nla_bc;
+ if (nla) {
+ err = inet_diag_bc_audit(nla, skb);
+ if (err) {
+ kfree(cb_data);
+ return err;
+ }
+ }
+
+ nla = cb_data->inet_diag_nla_bpf_stgs;
+ if (nla) {
+ struct bpf_sk_storage_diag *bpf_stg_diag;
- return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
+ bpf_stg_diag = bpf_sk_storage_diag_alloc(nla);
+ if (IS_ERR(bpf_stg_diag)) {
+ kfree(cb_data);
+ return PTR_ERR(bpf_stg_diag);
+ }
+ cb_data->bpf_stg_diag = bpf_stg_diag;
+ }
+
+ cb->data = cb_data;
+ return 0;
+}
+
+static int inet_diag_dump_start(struct netlink_callback *cb)
+{
+ return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2));
+}
+
+static int inet_diag_dump_start_compat(struct netlink_callback *cb)
+{
+ return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req));
+}
+
+static int inet_diag_dump_done(struct netlink_callback *cb)
+{
+ struct inet_diag_dump_data *cb_data = cb->data;
+
+ bpf_sk_storage_diag_free(cb_data->bpf_stg_diag);
+ kfree(cb->data);
+
+ return 0;
}
static int inet_diag_type2proto(int type)
@@ -1085,9 +1175,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct inet_diag_req *rc = nlmsg_data(cb->nlh);
- int hdrlen = sizeof(struct inet_diag_req);
struct inet_diag_req_v2 req;
- struct nlattr *bc = NULL;
req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
@@ -1095,10 +1183,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
req.idiag_states = rc->idiag_states;
req.id = rc->id;
- if (nlmsg_attrlen(cb->nlh, hdrlen))
- bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
-
- return __inet_diag_dump(skb, cb, &req, bc);
+ return __inet_diag_dump(skb, cb, &req);
}
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
@@ -1126,22 +1211,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
- if (nlmsg_attrlen(nlh, hdrlen)) {
- struct nlattr *attr;
- int err;
-
- attr = nlmsg_find_attr(nlh, hdrlen,
- INET_DIAG_REQ_BYTECODE);
- err = inet_diag_bc_audit(attr, skb);
- if (err)
- return err;
- }
- {
- struct netlink_dump_control c = {
- .dump = inet_diag_dump_compat,
- };
- return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
- }
+ struct netlink_dump_control c = {
+ .start = inet_diag_dump_start_compat,
+ .done = inet_diag_dump_done,
+ .dump = inet_diag_dump_compat,
+ };
+ return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
}
return inet_diag_get_exact_compat(skb, nlh);
@@ -1157,22 +1232,12 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
h->nlmsg_flags & NLM_F_DUMP) {
- if (nlmsg_attrlen(h, hdrlen)) {
- struct nlattr *attr;
- int err;
-
- attr = nlmsg_find_attr(h, hdrlen,
- INET_DIAG_REQ_BYTECODE);
- err = inet_diag_bc_audit(attr, skb);
- if (err)
- return err;
- }
- {
- struct netlink_dump_control c = {
- .dump = inet_diag_dump,
- };
- return netlink_dump_start(net->diag_nlsk, skb, h, &c);
- }
+ struct netlink_dump_control c = {
+ .start = inet_diag_dump_start,
+ .done = inet_diag_dump_done,
+ .dump = inet_diag_dump,
+ };
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
}
return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d84819893db9..aaaaf907e0d8 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -333,7 +333,7 @@ static int ip_mc_finish_output(struct net *net, struct sock *sk,
switch (ret) {
case NET_XMIT_CN:
do_cn = true;
- /* fall through */
+ fallthrough;
case NET_XMIT_SUCCESS:
break;
default:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 6e68def66822..9cf83cc85e4a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1465,7 +1465,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
case MRT_ADD_MFC:
case MRT_DEL_MFC:
parent = -1;
- /* fall through */
+ fallthrough;
case MRT_ADD_MFC_PROXY:
case MRT_DEL_MFC_PROXY:
if (optlen != sizeof(mfc)) {
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 4b2d49cc9f1a..0c72156130b6 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -173,7 +173,7 @@ static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
case ICMP_REDIRECT:
/* Max length: 24 "GATEWAY=255.255.255.255 " */
nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
- /* Fall through */
+ fallthrough;
case ICMP_DEST_UNREACH:
case ICMP_SOURCE_QUENCH:
case ICMP_TIME_EXCEEDED:
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index b2aeb7bf5dac..3c25a467b3ef 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -168,7 +168,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
pptp_msg_name[0]);
- /* fall through */
+ fallthrough;
case PPTP_SET_LINK_INFO:
/* only need to NAT in case PAC is behind NAT box */
case PPTP_START_SESSION_REQUEST:
@@ -271,7 +271,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
pr_debug("unknown inbound packet %s\n",
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
pptp_msg_name[0]);
- /* fall through */
+ fallthrough;
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
case PPTP_STOP_SESSION_REQUEST:
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index d072c326dd64..fdfca534d094 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1327,7 +1327,7 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
case AF_UNSPEC:
if (tb[NHA_GROUP])
break;
- /* fallthrough */
+ fallthrough;
default:
NL_SET_ERR_MSG(extack, "Invalid address family");
goto out;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3183413ebc6c..47665919048f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -1034,6 +1034,7 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
}
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(&h->lock)
{
struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file));
@@ -1056,6 +1057,7 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
EXPORT_SYMBOL_GPL(raw_seq_next);
void raw_seq_stop(struct seq_file *seq, void *v)
+ __releases(&h->lock)
{
struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file));
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index e35736b99300..1b5b8af27aaf 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -87,31 +87,30 @@ out_unlock:
return sk ? sk : ERR_PTR(-ENOENT);
}
-static int raw_diag_dump_one(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int raw_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
- struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *in_skb = cb->skb;
struct sk_buff *rep;
struct sock *sk;
+ struct net *net;
int err;
+ net = sock_net(in_skb->sk);
sk = raw_sock_get(net, r);
if (IS_ERR(sk))
return PTR_ERR(sk);
- rep = nlmsg_new(sizeof(struct inet_diag_msg) +
- sizeof(struct inet_diag_meminfo) + 64,
+ rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+ inet_diag_msg_attrs_size() +
+ nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
GFP_KERNEL);
if (!rep) {
sock_put(sk);
return -ENOMEM;
}
- err = inet_sk_diag_fill(sk, NULL, rep, r,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh,
+ err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0,
netlink_net_capable(in_skb, CAP_NET_ADMIN));
sock_put(sk);
@@ -136,25 +135,25 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
if (!inet_diag_bc_sk(bc, sk))
return 0;
- return inet_sk_diag_fill(sk, NULL, skb, r,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh, net_admin);
+ return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin);
}
static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
struct net *net = sock_net(skb->sk);
+ struct inet_diag_dump_data *cb_data;
int num, s_num, slot, s_slot;
struct sock *sk = NULL;
+ struct nlattr *bc;
if (IS_ERR(hashinfo))
return;
+ cb_data = cb->data;
+ bc = cb_data->inet_diag_nla_bc;
s_slot = cb->args[0];
num = s_num = cb->args[1];
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d9531b4b33f2..81b267e990a1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -764,6 +764,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "ip_autobind_reuse",
+ .data = &init_net.ipv4.sysctl_ip_autobind_reuse,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "fwmark_reflect",
.data = &init_net.ipv4.sysctl_fwmark_reflect,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 48aa457a9516..5c57850fab4b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2251,7 +2251,7 @@ void tcp_set_state(struct sock *sk, int state)
if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(sk);
- /* fall through */
+ fallthrough;
default:
if (oldstate == TCP_ESTABLISHED)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
@@ -3344,6 +3344,7 @@ static size_t tcp_opt_stats_get_size(void)
nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
0;
}
@@ -3399,6 +3400,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
+ nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
+ max_t(int, 0, tp->write_seq - tp->snd_nxt));
return stats;
}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 0d08f9e2d8d0..75a1c985f49a 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -179,15 +179,15 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin)
}
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+ inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r);
}
-static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+static int tcp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+ return inet_diag_dump_one_icsk(&tcp_hashinfo, cb, req);
}
#ifdef CONFIG_INET_DIAG_DESTROY
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6b6b57000dad..bf4ced9273e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2865,7 +2865,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
(*ack_flag & FLAG_LOST_RETRANS)))
return;
/* Change state if cwnd is undone or retransmits are lost */
- /* fall through */
+ fallthrough;
default:
if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED)
@@ -6367,7 +6367,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
mptcp_incoming_options(sk, skb, &tp->rx_opt);
break;
}
- /* fall through */
+ fallthrough;
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
/* RFC 793 says to queue data in these states,
@@ -6382,7 +6382,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 1;
}
}
- /* Fall through */
+ fallthrough;
case TCP_ESTABLISHED:
tcp_data_queue(sk, skb);
queued = 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 52acf0bc2ee5..83a5d24e13b8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2072,7 +2072,7 @@ do_time_wait:
}
}
/* to ACK */
- /* fall through */
+ fallthrough;
case TCP_TW_ACK:
tcp_v4_timewait_ack(sk, skb);
break;
@@ -2368,7 +2368,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
break;
st->bucket = 0;
st->state = TCP_SEQ_STATE_ESTABLISHED;
- /* Fallthrough */
+ fallthrough;
case TCP_SEQ_STATE_ESTABLISHED:
if (st->bucket > tcp_hashinfo.ehash_mask)
break;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index c8274371c3d0..03af7c3e75ef 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -819,6 +819,7 @@ EXPORT_SYMBOL(tcp_check_req);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
+ __releases(&((child)->sk_lock.slock))
{
int ret = 0;
int state = child->sk_state;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08a41f1e1cd2..2633fc231593 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1671,10 +1671,11 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
error = -EAGAIN;
do {
spin_lock_bh(&queue->lock);
- skb = __skb_try_recv_from_queue(sk, queue, flags,
- udp_skb_destructor,
- off, err, &last);
+ skb = __skb_try_recv_from_queue(sk, queue, flags, off,
+ err, &last);
if (skb) {
+ if (!(flags & MSG_PEEK))
+ udp_skb_destructor(sk, skb);
spin_unlock_bh(&queue->lock);
return skb;
}
@@ -1692,9 +1693,10 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
spin_lock(&sk_queue->lock);
skb_queue_splice_tail_init(sk_queue, queue);
- skb = __skb_try_recv_from_queue(sk, queue, flags,
- udp_skb_dtor_locked,
- off, err, &last);
+ skb = __skb_try_recv_from_queue(sk, queue, flags, off,
+ err, &last);
+ if (skb && !(flags & MSG_PEEK))
+ udp_skb_dtor_locked(sk, skb);
spin_unlock(&sk_queue->lock);
spin_unlock_bh(&queue->lock);
if (skb)
@@ -2561,7 +2563,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
#endif
- /* FALLTHROUGH */
+ fallthrough;
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
lock_sock(sk);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 910555a4d9fe..1dbece34496e 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -21,16 +21,15 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
if (!inet_diag_bc_sk(bc, sk))
return 0;
- return inet_sk_diag_fill(sk, NULL, skb, req,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
+ return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
+ net_admin);
}
-static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int udp_dump_one(struct udp_table *tbl,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
+ struct sk_buff *in_skb = cb->skb;
int err = -EINVAL;
struct sock *sk = NULL;
struct sk_buff *rep;
@@ -64,17 +63,15 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
goto out;
err = -ENOMEM;
- rep = nlmsg_new(sizeof(struct inet_diag_msg) +
- sizeof(struct inet_diag_meminfo) + 64,
+ rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+ inet_diag_msg_attrs_size() +
+ nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
GFP_KERNEL);
if (!rep)
goto out;
- err = inet_sk_diag_fill(sk, NULL, rep, req,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh,
- netlink_net_capable(in_skb, CAP_NET_ADMIN));
+ err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0,
+ netlink_net_capable(in_skb, CAP_NET_ADMIN));
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
@@ -93,12 +90,16 @@ out_nosk:
static void udp_dump(struct udp_table *table, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct net *net = sock_net(skb->sk);
+ struct inet_diag_dump_data *cb_data;
int num, s_num, slot, s_slot;
+ struct nlattr *bc;
+ cb_data = cb->data;
+ bc = cb_data->inet_diag_nla_bc;
s_slot = cb->args[0];
num = s_num = cb->args[1];
@@ -146,15 +147,15 @@ done:
}
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- udp_dump(&udp_table, skb, cb, r, bc);
+ udp_dump(&udp_table, skb, cb, r);
}
-static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+static int udp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return udp_dump_one(&udp_table, in_skb, nlh, req);
+ return udp_dump_one(&udp_table, cb, req);
}
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
@@ -249,16 +250,15 @@ static const struct inet_diag_handler udp_diag_handler = {
};
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- udp_dump(&udplite_table, skb, cb, r, bc);
+ udp_dump(&udplite_table, skb, cb, r);
}
-static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+static int udplite_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return udp_dump_one(&udplite_table, in_skb, nlh, req);
+ return udp_dump_one(&udplite_table, cb, req);
}
static const struct inet_diag_handler udplite_diag_handler = {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cb493e15959c..5b9de773ce73 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
}
static void
-cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
+cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
+ bool del_rt, bool del_peer)
{
struct fib6_info *f6i;
- f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+ f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
+ ifp->prefix_len,
ifp->idev->dev, 0, RTF_DEFAULT, true);
if (f6i) {
if (del_rt)
@@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
if (action != CLEANUP_PREFIX_RT_NOP) {
cleanup_prefix_route(ifp, expires,
- action == CLEANUP_PREFIX_RT_DEL);
+ action == CLEANUP_PREFIX_RT_DEL, false);
}
/* clean up prefsrc entries */
@@ -3299,7 +3301,7 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
switch (idev->cnf.addr_gen_mode) {
case IN6_ADDR_GEN_MODE_RANDOM:
ipv6_gen_mode_random_init(idev);
- /* fallthrough */
+ fallthrough;
case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
if (!ipv6_generate_stable_address(&addr, 0, idev))
addrconf_add_linklocal(idev, &addr,
@@ -3345,6 +3347,10 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_NONE) &&
(dev->type != ARPHRD_RAWIP)) {
/* Alas, we support only Ethernet autoconfiguration. */
+ idev = __in6_dev_get(dev);
+ if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
+ dev->flags & IFF_MULTICAST)
+ ipv6_mc_up(idev);
return;
}
@@ -3517,9 +3523,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
run_pending = 1;
-
- /* fall through */
-
+ fallthrough;
case NETDEV_UP:
case NETDEV_CHANGE:
if (dev->flags & IFF_SLAVE)
@@ -4586,12 +4590,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int modify_prefix_route(struct inet6_ifaddr *ifp,
- unsigned long expires, u32 flags)
+ unsigned long expires, u32 flags,
+ bool modify_peer)
{
struct fib6_info *f6i;
u32 prio;
- f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+ f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+ ifp->prefix_len,
ifp->idev->dev, 0, RTF_DEFAULT, true);
if (!f6i)
return -ENOENT;
@@ -4602,7 +4608,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
ip6_del_rt(dev_net(ifp->idev->dev), f6i);
/* add new one */
- addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
+ addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+ ifp->prefix_len,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
} else {
@@ -4624,6 +4631,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
unsigned long timeout;
bool was_managetempaddr;
bool had_prefixroute;
+ bool new_peer = false;
ASSERT_RTNL();
@@ -4655,6 +4663,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
cfg->preferred_lft = timeout;
}
+ if (cfg->peer_pfx &&
+ memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
+ if (!ipv6_addr_any(&ifp->peer_addr))
+ cleanup_prefix_route(ifp, expires, true, true);
+ new_peer = true;
+ }
+
spin_lock_bh(&ifp->lock);
was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
@@ -4670,6 +4685,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
ifp->rt_priority = cfg->rt_priority;
+ if (new_peer)
+ ifp->peer_addr = *cfg->peer_pfx;
+
spin_unlock_bh(&ifp->lock);
if (!(ifp->flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ifp);
@@ -4678,7 +4696,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
int rc = -ENOENT;
if (had_prefixroute)
- rc = modify_prefix_route(ifp, expires, flags);
+ rc = modify_prefix_route(ifp, expires, flags, false);
/* prefix route could have been deleted; if so restore it */
if (rc == -ENOENT) {
@@ -4686,6 +4704,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
}
+
+ if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
+ rc = modify_prefix_route(ifp, expires, flags, true);
+
+ if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
+ addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
+ ifp->rt_priority, ifp->idev->dev,
+ expires, flags, GFP_KERNEL);
+ }
} else if (had_prefixroute) {
enum cleanup_prefix_rt_t action;
unsigned long rt_expires;
@@ -4696,7 +4723,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
if (action != CLEANUP_PREFIX_RT_NOP) {
cleanup_prefix_route(ifp, rt_expires,
- action == CLEANUP_PREFIX_RT_DEL);
+ action == CLEANUP_PREFIX_RT_DEL, false);
}
}
@@ -5983,9 +6010,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
if (!ipv6_addr_any(&ifp->peer_addr))
- addrconf_prefix_route(&ifp->peer_addr, 128, 0,
- ifp->idev->dev, 0, 0,
- GFP_ATOMIC);
+ addrconf_prefix_route(&ifp->peer_addr, 128,
+ ifp->rt_priority, ifp->idev->dev,
+ 0, 0, GFP_ATOMIC);
break;
case RTM_DELADDR:
if (ifp->idev->cnf.forwarding)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 95835e8d99aa..45e2adc56610 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -36,7 +36,7 @@ struct tmp_ext {
struct in6_addr saddr;
#endif
struct in6_addr daddr;
- char hdrs[0];
+ char hdrs[];
};
struct ah_skb_cb {
@@ -259,7 +259,7 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
case NEXTHDR_DEST:
if (dir == XFRM_POLICY_OUT)
ipv6_rearrange_destopt(iph, exthdr.opth);
- /* fall through */
+ fallthrough;
case NEXTHDR_HOP:
if (!zero_out_mutable_opts(exthdr.opth)) {
net_dbg_ratelimited("overrun %sopts\n",
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ab5add0fe6b4..bcb9f5e62808 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -97,7 +97,7 @@ static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
*/
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
break;
- /* fall through */
+ fallthrough;
case 2: /* send ICMP PARM PROB regardless and drop packet */
icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
return false;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ef408a5090a2..2688f3e82165 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -898,7 +898,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
hdr = icmp6_hdr(skb);
/* to notify */
- /* fall through */
+ fallthrough;
case ICMPV6_DEST_UNREACH:
case ICMPV6_TIME_EXCEED:
case ICMPV6_PARAMPROB:
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 72abf892302f..46ed56719476 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -2068,8 +2068,8 @@ static int fib6_walk_continue(struct fib6_walker *w)
continue;
}
w->state = FWS_L;
+ fallthrough;
#endif
- /* fall through */
case FWS_L:
left = rcu_dereference_protected(fn->left, 1);
if (left) {
@@ -2078,7 +2078,7 @@ static int fib6_walk_continue(struct fib6_walker *w)
continue;
}
w->state = FWS_R;
- /* fall through */
+ fallthrough;
case FWS_R:
right = rcu_dereference_protected(fn->right, 1);
if (right) {
@@ -2088,7 +2088,7 @@ static int fib6_walk_continue(struct fib6_walker *w)
}
w->state = FWS_C;
w->leaf = rcu_dereference_protected(fn->leaf, 1);
- /* fall through */
+ fallthrough;
case FWS_C:
if (w->leaf && fn->fn_flags & RTN_RTINFO) {
int err;
@@ -2107,7 +2107,7 @@ static int fib6_walk_continue(struct fib6_walker *w)
}
skip:
w->state = FWS_U;
- /* fall through */
+ fallthrough;
case FWS_U:
if (fn == w->root)
return 0;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index d6483926f449..65a54d74acc1 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1691,7 +1691,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
case MRT6_ADD_MFC:
case MRT6_DEL_MFC:
parent = -1;
- /* fall through */
+ fallthrough;
case MRT6_ADD_MFC_PROXY:
case MRT6_DEL_MFC_PROXY:
if (optlen < sizeof(mfc))
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 53caf59c591e..4a3feccd5b10 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1782,7 +1782,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
fib6_run_gc(0, net, false);
- /* fallthrough */
+ fallthrough;
case NETDEV_UP:
idev = in6_dev_get(dev);
if (!idev)
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index 22b80db6d882..da64550a5707 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -248,7 +248,7 @@ static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
/* Max length: 17 "POINTER=ffffffff " */
nf_log_buf_add(m, "POINTER=%08x ",
ntohl(ic->icmp6_pointer));
- /* Fall through */
+ fallthrough;
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dfe5e603ffe1..0028aa1d7869 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1076,7 +1076,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
@@ -1099,7 +1099,7 @@ static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return compat_ipv6_setsockopt(sk, level, optname,
optval, optlen);
@@ -1161,7 +1161,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
@@ -1184,7 +1184,7 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return compat_ipv6_getsockopt(sk, level, optname,
optval, optlen);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2931224b674e..2430c2f6819a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4370,7 +4370,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case IPSTATS_MIB_OUTNOROUTES:
IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
break;
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index ab7f124ff5d7..ac837afb9040 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -29,7 +29,7 @@
struct seg6_lwt {
struct dst_cache cache;
- struct seg6_iptunnel_encap tuninfo[0];
+ struct seg6_iptunnel_encap tuninfo[];
};
static inline struct seg6_lwt *seg6_lwt_lwtunnel(struct lwtunnel_state *lwt)
@@ -268,7 +268,7 @@ static int seg6_do_srh(struct sk_buff *skb)
skb_mac_header_rebuild(skb);
skb_push(skb, skb->mac_len);
- err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE);
+ err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET);
if (err)
return err;
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 7cbc19731997..8165802d8e05 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -282,7 +282,7 @@ static int input_action_end_dx2(struct sk_buff *skb,
struct net_device *odev;
struct ethhdr *eth;
- if (!decap_and_validate(skb, NEXTHDR_NONE))
+ if (!decap_and_validate(skb, IPPROTO_ETHERNET))
goto drop;
if (!pskb_may_pull(skb, ETH_HLEN))
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index ec8fcfc60a27..63b657aa8d29 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -203,29 +203,16 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
struct ctl_table *ipv6_icmp_table;
- int err;
+ int err, i;
err = -ENOMEM;
ipv6_table = kmemdup(ipv6_table_template, sizeof(ipv6_table_template),
GFP_KERNEL);
if (!ipv6_table)
goto out;
- ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
- ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
- ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
- ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
- ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
- ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
- ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
- ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
- ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
- ipv6_table[9].data = &net->ipv6.sysctl.flowlabel_reflect;
- ipv6_table[10].data = &net->ipv6.sysctl.max_dst_opts_cnt;
- ipv6_table[11].data = &net->ipv6.sysctl.max_hbh_opts_cnt;
- ipv6_table[12].data = &net->ipv6.sysctl.max_dst_opts_len;
- ipv6_table[13].data = &net->ipv6.sysctl.max_hbh_opts_len;
- ipv6_table[14].data = &net->ipv6.sysctl.multipath_hash_policy,
- ipv6_table[15].data = &net->ipv6.sysctl.seg6_flowlabel;
+ /* Update the variables to point into the current struct net */
+ for (i = 0; i < ARRAY_SIZE(ipv6_table_template) - 1; i++)
+ ipv6_table[i].data += (void *)net - (void *)&init_net;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index eaf09e6b7844..413b3425ac66 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1742,7 +1742,7 @@ do_time_wait:
}
}
/* to ACK */
- /* fall through */
+ fallthrough;
case TCP_TW_ACK:
tcp_v6_timewait_ack(sk, skb);
break;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index ea9e73428ed9..56fac24a627a 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -380,9 +380,7 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
struct bpf_prog *prog = psock->bpf_prog;
int res;
- preempt_disable();
- res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 2db3d50d10a4..10cf7c3dcbb3 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -116,7 +116,7 @@ struct l2tp_session {
void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
void (*session_close)(struct l2tp_session *session);
void (*show)(struct seq_file *m, void *priv);
- uint8_t priv[0]; /* private data */
+ u8 priv[]; /* private data */
};
/* Describes the tunnel. It contains info to track all the associated
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d69983370381..38a0383dfbcf 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1152,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
}
}
- if (!(mpath->flags & MESH_PATH_RESOLVING))
+ if (!(mpath->flags & MESH_PATH_RESOLVING) &&
+ mesh_path_sel_is_hwmp(sdata))
mesh_queue_preq(mpath, PREQ_Q_F_START);
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 768a302879b4..0e9aa94adc07 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -98,7 +98,7 @@ struct mpls_nh { /* next hop label forwarding entry */
u8 nh_via_table;
u8 nh_reserved1;
- u32 nh_label[0];
+ u32 nh_label[];
};
/* offset of via from beginning of mpls_nh */
@@ -154,7 +154,7 @@ struct mpls_route { /* next hop label forwarding entry */
u8 rt_nh_size;
u8 rt_via_offset;
u8 rt_reserved1;
- struct mpls_nh rt_nh[0];
+ struct mpls_nh rt_nh[];
};
#define for_nexthops(rt) { \
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 45acd877bef3..9c71f427e6e3 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -304,21 +304,22 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
struct mptcp_ext *ext)
{
- ext->data_fin = 1;
-
if (!ext->use_map) {
/* RFC6824 requires a DSS mapping with specific values
* if DATA_FIN is set but no data payload is mapped
*/
+ ext->data_fin = 1;
ext->use_map = 1;
ext->dsn64 = 1;
- ext->data_seq = mptcp_sk(subflow->conn)->write_seq;
+ ext->data_seq = subflow->data_fin_tx_seq;
ext->subflow_seq = 0;
ext->data_len = 1;
- } else {
- /* If there's an existing DSS mapping, DATA_FIN consumes
- * 1 additional byte of mapping space.
+ } else if (ext->data_seq + ext->data_len == subflow->data_fin_tx_seq) {
+ /* If there's an existing DSS mapping and it is the
+ * final mapping, DATA_FIN consumes 1 additional byte of
+ * mapping space.
*/
+ ext->data_fin = 1;
ext->data_len++;
}
}
@@ -334,6 +335,8 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
struct mptcp_sock *msk;
unsigned int ack_size;
bool ret = false;
+ bool can_ack;
+ u64 ack_seq;
u8 tcp_fin;
if (skb) {
@@ -354,15 +357,27 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
if (mpext)
opts->ext_copy = *mpext;
- if (skb && tcp_fin &&
- subflow->conn->sk_state != TCP_ESTABLISHED)
+ if (skb && tcp_fin && subflow->data_fin_tx_enable)
mptcp_write_data_fin(subflow, &opts->ext_copy);
ret = true;
}
+ /* passive sockets msk will set the 'can_ack' after accept(), even
+ * if the first subflow may have the already the remote key handy
+ */
+ can_ack = true;
opts->ext_copy.use_ack = 0;
msk = mptcp_sk(subflow->conn);
- if (!msk || !READ_ONCE(msk->can_ack)) {
+ if (likely(msk && READ_ONCE(msk->can_ack))) {
+ ack_seq = msk->ack_seq;
+ } else if (subflow->can_ack) {
+ mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+ ack_seq++;
+ } else {
+ can_ack = false;
+ }
+
+ if (unlikely(!can_ack)) {
*size = ALIGN(dss_size, 4);
return ret;
}
@@ -375,7 +390,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
dss_size += ack_size;
- opts->ext_copy.data_ack = msk->ack_seq;
+ opts->ext_copy.data_ack = ack_seq;
opts->ext_copy.ack64 = 1;
opts->ext_copy.use_ack = 1;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index a8445407d25a..c0cef07f4382 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -141,11 +141,13 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
bool more_data_avail;
struct tcp_sock *tp;
bool done = false;
- int rcvbuf;
- rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
- if (rcvbuf > sk->sk_rcvbuf)
- sk->sk_rcvbuf = rcvbuf;
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
+
+ if (rcvbuf > sk->sk_rcvbuf)
+ sk->sk_rcvbuf = rcvbuf;
+ }
tp = tcp_sk(ssk);
do {
@@ -419,6 +421,15 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EOPNOTSUPP;
lock_sock(sk);
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ ret = sk_stream_wait_connect(sk, &timeo);
+ if (ret)
+ goto out;
+ }
+
ssock = __mptcp_tcp_fallback(msk);
if (unlikely(ssock)) {
fallback:
@@ -427,8 +438,6 @@ fallback:
return ret >= 0 ? ret + copied : (copied ? copied : ret);
}
- timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
-
ssk = mptcp_subflow_get(msk);
if (!ssk) {
release_sock(sk);
@@ -460,6 +469,7 @@ fallback:
ssk_check_wmem(msk, ssk);
release_sock(ssk);
+out:
release_sock(sk);
return ret;
}
@@ -712,7 +722,8 @@ static void mptcp_cancel_work(struct sock *sk)
sock_put(sk);
}
-static void mptcp_subflow_shutdown(struct sock *ssk, int how)
+static void mptcp_subflow_shutdown(struct sock *ssk, int how,
+ bool data_fin_tx_enable, u64 data_fin_tx_seq)
{
lock_sock(ssk);
@@ -725,6 +736,14 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how)
tcp_disconnect(ssk, O_NONBLOCK);
break;
default:
+ if (data_fin_tx_enable) {
+ struct mptcp_subflow_context *subflow;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ subflow->data_fin_tx_seq = data_fin_tx_seq;
+ subflow->data_fin_tx_enable = 1;
+ }
+
ssk->sk_shutdown |= how;
tcp_shutdown(ssk, how);
break;
@@ -741,6 +760,7 @@ static void mptcp_close(struct sock *sk, long timeout)
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
LIST_HEAD(conn_list);
+ u64 data_fin_tx_seq;
lock_sock(sk);
@@ -749,11 +769,15 @@ static void mptcp_close(struct sock *sk, long timeout)
list_splice_init(&msk->conn_list, &conn_list);
+ data_fin_tx_seq = msk->write_seq;
+
release_sock(sk);
list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ subflow->data_fin_tx_seq = data_fin_tx_seq;
+ subflow->data_fin_tx_enable = 1;
__mptcp_close_ssk(sk, ssk, subflow, timeout);
}
@@ -846,7 +870,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
*err = -ENOBUFS;
local_bh_enable();
release_sock(sk);
- mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1);
+ mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1, 0, 0);
tcp_close(newsk, 0);
return NULL;
}
@@ -1025,6 +1049,10 @@ void mptcp_finish_connect(struct sock *ssk)
WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
WRITE_ONCE(msk->ack_seq, ack_seq);
WRITE_ONCE(msk->can_ack, 1);
+ if (inet_sk_state_load(sk) != TCP_ESTABLISHED) {
+ inet_sk_state_store(sk, TCP_ESTABLISHED);
+ sk->sk_state_change(sk);
+ }
}
static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
@@ -1301,7 +1329,7 @@ static int mptcp_shutdown(struct socket *sock, int how)
mptcp_for_each_subflow(msk, subflow) {
struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
- mptcp_subflow_shutdown(tcp_sk, how);
+ mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq);
}
out_unlock:
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 6c0b2c8ab674..313558fa8185 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -125,7 +125,9 @@ struct mptcp_subflow_context {
mpc_map : 1,
data_avail : 1,
rx_eof : 1,
+ data_fin_tx_enable : 1,
can_ack : 1; /* only after processing the remote a key */
+ u64 data_fin_tx_seq;
struct sock *tcp_sock; /* tcp sk backpointer */
struct sock *conn; /* parent mptcp_sock */
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 410809c669e1..4912069627b6 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
-
+ (*pos)++;
return NULL;
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 8af28e10b4e6..9a477bd563b7 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -252,6 +252,19 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
}
EXPORT_SYMBOL_GPL(flow_offload_add);
+void flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+{
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+
+ if (likely(!nf_flowtable_hw_offload(flow_table) ||
+ !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
+ return;
+
+ nf_flow_offload_add(flow_table, flow);
+}
+EXPORT_SYMBOL_GPL(flow_offload_refresh);
+
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -372,6 +385,50 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
+int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+ int err = 0;
+
+ mutex_lock(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto unlock;
+ }
+
+ list_add_tail(&block_cb->list, &block->cb_list);
+
+unlock:
+ mutex_unlock(&flow_table->flow_block_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
+
+void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+
+ mutex_lock(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb)
+ list_del(&block_cb->list);
+ else
+ WARN_ON(true);
+ mutex_unlock(&flow_table->flow_block_lock);
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
+
static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
__be16 port, __be16 new_port)
{
@@ -494,6 +551,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
flow_block_init(&flowtable->flow_block);
+ mutex_init(&flowtable->flow_block_lock);
err = rhashtable_init(&flowtable->rhashtable,
&nf_flow_offload_rhash_params);
@@ -550,11 +608,13 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
mutex_lock(&flowtable_lock);
list_del(&flow_table->list);
mutex_unlock(&flowtable_lock);
+
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
nf_flow_table_offload_flush(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
+ mutex_destroy(&flow_table->flow_block_lock);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 9e563fd3da0f..5272721080f8 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -232,13 +232,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
-static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
- struct flow_offload *flow)
-{
- return nf_flowtable_hw_offload(flow_table) &&
- test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
-}
-
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -279,8 +272,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
return NF_ACCEPT;
- if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
- nf_flow_offload_add(flow_table, flow);
+ flow_offload_refresh(flow_table, flow);
if (nf_flow_offload_dst_check(&rt->dst)) {
flow_offload_teardown(flow);
@@ -290,7 +282,6 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
skb->tstamp = 0;
@@ -508,8 +499,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
sizeof(*ip6h)))
return NF_ACCEPT;
- if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
- nf_flow_offload_add(flow_table, flow);
+ flow_offload_refresh(flow_table, flow);
if (nf_flow_offload_dst_check(&rt->dst)) {
flow_offload_teardown(flow);
@@ -522,7 +512,6 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
return NF_DROP;
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
skb->tstamp = 0;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 06f00cdc3891..42b73a084a63 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -23,29 +23,6 @@ struct flow_offload_work {
struct flow_offload *flow;
};
-struct nf_flow_key {
- struct flow_dissector_key_meta meta;
- struct flow_dissector_key_control control;
- struct flow_dissector_key_basic basic;
- union {
- struct flow_dissector_key_ipv4_addrs ipv4;
- struct flow_dissector_key_ipv6_addrs ipv6;
- };
- struct flow_dissector_key_tcp tcp;
- struct flow_dissector_key_ports tp;
-} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
-
-struct nf_flow_match {
- struct flow_dissector dissector;
- struct nf_flow_key key;
- struct nf_flow_key mask;
-};
-
-struct nf_flow_rule {
- struct nf_flow_match match;
- struct flow_rule *rule;
-};
-
#define NF_FLOW_DISSECTOR(__match, __type, __field) \
(__match)->dissector.offset[__type] = \
offsetof(struct nf_flow_key, __field)
@@ -610,6 +587,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
if (cmd == FLOW_CLS_REPLACE)
cls_flow.rule = flow_rule->rule;
+ mutex_lock(&flowtable->flow_block_lock);
list_for_each_entry(block_cb, block_cb_list, list) {
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
block_cb->cb_priv);
@@ -618,6 +596,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
i++;
}
+ mutex_unlock(&flowtable->flow_block_lock);
return i;
}
@@ -692,8 +671,10 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload,
FLOW_CLS_STATS,
&offload->flow->tuplehash[dir].tuple, &extack);
+ mutex_lock(&flowtable->flow_block_lock);
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
+ mutex_unlock(&flowtable->flow_block_lock);
memcpy(stats, &cls_flow.stats, sizeof(*stats));
}
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b0930d4aba22..b9cbe1e2453e 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu + 1;
return per_cpu_ptr(snet->stats, cpu);
}
-
+ (*pos)++;
return NULL;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d1318bdf49ca..38c680f28f15 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1405,6 +1405,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
lockdep_commit_lock_is_held(net));
if (nft_dump_stats(skb, stats))
goto nla_put_failure;
+
+ if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
+ nla_put_be32(skb, NFTA_CHAIN_FLAGS,
+ htonl(NFT_CHAIN_HW_OFFLOAD)))
+ goto nla_put_failure;
}
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
@@ -6300,8 +6305,13 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err4;
err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
- if (err < 0)
+ if (err < 0) {
+ list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ list_del_rcu(&hook->list);
+ kfree_rcu(hook, rcu);
+ }
goto err4;
+ }
err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
if (err < 0)
@@ -7378,13 +7388,8 @@ static void nf_tables_module_autoload(struct net *net)
list_splice_init(&net->nft.module_list, &module_list);
mutex_unlock(&net->nft.commit_mutex);
list_for_each_entry_safe(req, next, &module_list, list) {
- if (req->done) {
- list_del(&req->list);
- kfree(req);
- } else {
- request_module("%s", req->module);
- req->done = true;
- }
+ request_module("%s", req->module);
+ req->done = true;
}
mutex_lock(&net->nft.commit_mutex);
list_splice(&module_list, &net->nft.module_list);
@@ -8167,6 +8172,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
__nft_release_tables(net);
mutex_unlock(&net->nft.commit_mutex);
WARN_ON_ONCE(!list_empty(&net->nft.tables));
+ WARN_ON_ONCE(!list_empty(&net->nft.module_list));
}
static struct pernet_operations nf_tables_net_ops = {
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de3a9596b7f1..a5f294aa8e4c 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
[NFCTH_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN-1 },
[NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+ [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
+ [NFCTH_STATUS] = { .type = NLA_U32, },
};
static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
index ff9ac8ae0031..eac4a901233f 100644
--- a/net/netfilter/nft_chain_nat.c
+++ b/net/netfilter/nft_chain_nat.c
@@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
.family = NFPROTO_INET,
+ .owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 1993af3a2979..a7de3a58f553 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -129,6 +129,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
[NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
+ [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
};
static int nft_payload_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 4c3f2e24c7cb..764e88682a81 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -339,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] =
[NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
[NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
[NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
+ [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
+ [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
[NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
};
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e27c6c5ba9df..cd2b034eef59 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
struct nf_mttg_trav *trav = seq->private;
+ if (ppos != NULL)
+ ++(*ppos);
+
switch (trav->class) {
case MTTG_TRAV_INIT:
trav->class = MTTG_TRAV_NFP_UNSPEC;
@@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
default:
return NULL;
}
-
- if (ppos != NULL)
- ++*ppos;
return trav;
}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 0a9708004e20..225a7ab6d79a 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
const struct recent_entry *e = v;
const struct list_head *head = e->list.next;
+ (*pos)++;
while (head == &t->iphash[st->bucket]) {
if (++st->bucket >= ip_list_hash_size)
return NULL;
head = t->iphash[st->bucket].next;
}
- (*pos)++;
return list_entry(head, struct recent_entry, list);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 813bfab13296..ed77c75bf63f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2434,7 +2434,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
in_skb->len))
WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
(u8 *)extack->bad_attr -
- in_skb->data));
+ (u8 *)nlh));
} else {
if (extack->cookie_len)
WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
@@ -2583,6 +2583,7 @@ static void *__netlink_seq_next(struct seq_file *seq)
}
static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
+ __acquires(RCU)
{
struct nl_seq_iter *iter = seq->private;
void *obj = SEQ_START_TOKEN;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 6f1b096e601c..43811b5219b5 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -181,13 +181,20 @@ exit:
void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
struct sk_buff *skb)
{
- u8 gate = hdev->pipes[pipe].gate;
u8 status = NFC_HCI_ANY_OK;
struct hci_create_pipe_resp *create_info;
struct hci_delete_pipe_noti *delete_info;
struct hci_all_pipe_cleared_noti *cleared_info;
+ u8 gate;
- pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+ pr_debug("from pipe %x cmd %x\n", pipe, cmd);
+
+ if (pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
+ gate = hdev->pipes[pipe].gate;
switch (cmd) {
case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
@@ -375,8 +382,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
struct sk_buff *skb)
{
int r = 0;
- u8 gate = hdev->pipes[pipe].gate;
+ u8 gate;
+
+ if (pipe >= NFC_HCI_MAX_PIPES) {
+ pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
+ goto exit;
+ }
+ gate = hdev->pipes[pipe].gate;
if (gate == NFC_HCI_INVALID_GATE) {
pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
goto exit;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index eee0dddb7749..e894254c17d4 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -32,6 +32,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
.len = NFC_DEVICE_NAME_MAXSIZE },
[NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+ [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 },
[NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
[NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
[NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
@@ -43,7 +44,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
.len = NFC_FIRMWARE_NAME_MAXSIZE },
+ [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
[NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+ [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+ [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
[NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
};
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index c047afd12116..07a7dd185995 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -645,6 +645,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
+ [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
};
static const struct genl_ops dp_packet_genl_ops[] = {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 30c6879d6774..e5b0986215d2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2274,6 +2274,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
TP_STATUS_KERNEL, (macoff+snaplen));
if (!h.raw)
goto drop_n_account;
+
+ if (do_vnet &&
+ virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+ vio_le(), true, 0))
+ goto drop_n_account;
+
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
/*
@@ -2286,12 +2293,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
status |= TP_STATUS_LOSING;
}
- if (do_vnet &&
- virtio_net_hdr_from_skb(skb, h.raw + macoff -
- sizeof(struct virtio_net_hdr),
- vio_le(), true, 0))
- goto drop_n_account;
-
po->stats.stats1.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 413228c4520e..e7d0fe3f4330 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -286,9 +286,38 @@ static int server_del(struct qrtr_node *node, unsigned int port)
return 0;
}
+static int say_hello(struct sockaddr_qrtr *dest)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct msghdr msg = { };
+ struct kvec iv;
+ int ret;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_HELLO);
+
+ msg.msg_name = (struct sockaddr *)dest;
+ msg.msg_namelen = sizeof(*dest);
+
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send hello msg\n");
+
+ return ret;
+}
+
/* Announce the list of servers registered on the local node */
static int ctrl_cmd_hello(struct sockaddr_qrtr *sq)
{
+ int ret;
+
+ ret = say_hello(sq);
+ if (ret < 0)
+ return ret;
+
return announce_servers(sq);
}
@@ -566,29 +595,6 @@ static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from,
}
}
-static int say_hello(void)
-{
- struct qrtr_ctrl_pkt pkt;
- struct msghdr msg = { };
- struct kvec iv;
- int ret;
-
- iv.iov_base = &pkt;
- iv.iov_len = sizeof(pkt);
-
- memset(&pkt, 0, sizeof(pkt));
- pkt.cmd = cpu_to_le32(QRTR_TYPE_HELLO);
-
- msg.msg_name = (struct sockaddr *)&qrtr_ns.bcast_sq;
- msg.msg_namelen = sizeof(qrtr_ns.bcast_sq);
-
- ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
- if (ret < 0)
- pr_err("failed to send hello msg\n");
-
- return ret;
-}
-
static void qrtr_ns_worker(struct work_struct *work)
{
const struct qrtr_ctrl_pkt *pkt;
@@ -687,7 +693,7 @@ static void qrtr_ns_data_ready(struct sock *sk)
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
}
-void qrtr_ns_init(struct work_struct *work)
+void qrtr_ns_init(void)
{
struct sockaddr_qrtr sq;
int ret;
@@ -725,7 +731,7 @@ void qrtr_ns_init(struct work_struct *work)
if (!qrtr_ns.workqueue)
goto err_sock;
- ret = say_hello();
+ ret = say_hello(&qrtr_ns.bcast_sq);
if (ret < 0)
goto err_wq;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 423310896285..e22092e4a783 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -9,7 +9,6 @@
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/spinlock.h>
#include <linux/wait.h>
-#include <linux/workqueue.h>
#include <net/sock.h>
@@ -110,8 +109,6 @@ static DEFINE_MUTEX(qrtr_node_lock);
static DEFINE_IDR(qrtr_ports);
static DEFINE_MUTEX(qrtr_port_lock);
-static struct delayed_work qrtr_ns_work;
-
/**
* struct qrtr_node - endpoint node
* @ep_lock: lock for endpoint management and callbacks
@@ -1263,11 +1260,7 @@ static int __init qrtr_proto_init(void)
return rc;
}
- /* FIXME: Currently, this 2s delay is required to catch the NEW_SERVER
- * messages from routers. But the fix could be somewhere else.
- */
- INIT_DELAYED_WORK(&qrtr_ns_work, qrtr_ns_init);
- schedule_delayed_work(&qrtr_ns_work, msecs_to_jiffies(2000));
+ qrtr_ns_init();
return rc;
}
@@ -1275,7 +1268,6 @@ postcore_initcall(qrtr_proto_init);
static void __exit qrtr_proto_fini(void)
{
- cancel_delayed_work_sync(&qrtr_ns_work);
qrtr_ns_remove();
sock_unregister(qrtr_family.family);
proto_unregister(&qrtr_proto);
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index 53a237a28971..dc2b67f17927 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -29,7 +29,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
-void qrtr_ns_init(struct work_struct *work);
+void qrtr_ns_init(void);
void qrtr_ns_remove(void);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index edde0e519438..bfbefb7bff9d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -972,7 +972,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT
tristate "connection tracking tc action"
- depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT
+ depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE
help
Say Y here to allow sending the packets to conntrack module.
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 8c466a712cda..aa7b737fed2e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -185,6 +185,7 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
return nla_total_size(0) /* action number nested */
+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
+ cookie_len /* TCA_ACT_COOKIE */
+ + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS_TYPE */
+ nla_total_size(0) /* TCA_ACT_STATS nested */
+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
/* TCA_STATS_BASIC */
@@ -788,6 +789,17 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
}
rcu_read_unlock();
+ if (a->hw_stats_type != TCA_ACT_HW_STATS_TYPE_ANY) {
+ struct nla_bitfield32 hw_stats_type = {
+ a->hw_stats_type,
+ TCA_ACT_HW_STATS_TYPE_ANY,
+ };
+
+ if (nla_put(skb, TCA_ACT_HW_STATS_TYPE, sizeof(hw_stats_type),
+ &hw_stats_type))
+ goto nla_put_failure;
+ }
+
if (a->tcfa_flags) {
struct nla_bitfield32 flags = { a->tcfa_flags,
a->tcfa_flags, };
@@ -854,7 +866,23 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
return c;
}
+static u8 tcf_action_hw_stats_type_get(struct nlattr *hw_stats_type_attr)
+{
+ struct nla_bitfield32 hw_stats_type_bf;
+
+ /* If the user did not pass the attr, that means he does
+ * not care about the type. Return "any" in that case
+ * which is setting on all supported types.
+ */
+ if (!hw_stats_type_attr)
+ return TCA_ACT_HW_STATS_TYPE_ANY;
+ hw_stats_type_bf = nla_get_bitfield32(hw_stats_type_attr);
+ return hw_stats_type_bf.value;
+}
+
static const u32 tca_act_flags_allowed = TCA_ACT_FLAGS_NO_PERCPU_STATS;
+static const u32 tca_act_hw_stats_type_allowed = TCA_ACT_HW_STATS_TYPE_ANY;
+
static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_KIND] = { .type = NLA_STRING },
[TCA_ACT_INDEX] = { .type = NLA_U32 },
@@ -863,6 +891,8 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
[TCA_ACT_FLAGS] = { .type = NLA_BITFIELD32,
.validation_data = &tca_act_flags_allowed },
+ [TCA_ACT_HW_STATS_TYPE] = { .type = NLA_BITFIELD32,
+ .validation_data = &tca_act_hw_stats_type_allowed },
};
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
@@ -871,6 +901,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
bool rtnl_held,
struct netlink_ext_ack *extack)
{
+ u8 hw_stats_type = TCA_ACT_HW_STATS_TYPE_ANY;
struct nla_bitfield32 flags = { 0, 0 };
struct tc_action *a;
struct tc_action_ops *a_o;
@@ -903,6 +934,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
goto err_out;
}
}
+ hw_stats_type =
+ tcf_action_hw_stats_type_get(tb[TCA_ACT_HW_STATS_TYPE]);
if (tb[TCA_ACT_FLAGS])
flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
} else {
@@ -953,6 +986,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (!name && tb[TCA_ACT_COOKIE])
tcf_set_action_cookie(&a->act_cookie, cookie);
+ if (!name)
+ a->hw_stats_type = hw_stats_type;
+
/* module count goes up only when brand new policy is created
* if it exists and is only bound to in a_o->init() then
* ACT_P_CREATED is not returned (a zero is).
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index f685c0d73708..56b66d215a89 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -15,6 +15,7 @@
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/rhashtable.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -24,6 +25,7 @@
#include <uapi/linux/tc_act/tc_ct.h>
#include <net/tc_act/tc_ct.h>
+#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -31,6 +33,523 @@
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <uapi/linux/netfilter/nf_nat.h>
+static struct workqueue_struct *act_ct_wq;
+static struct rhashtable zones_ht;
+static DEFINE_MUTEX(zones_mutex);
+
+struct tcf_ct_flow_table {
+ struct rhash_head node; /* In zones tables */
+
+ struct rcu_work rwork;
+ struct nf_flowtable nf_ft;
+ refcount_t ref;
+ u16 zone;
+
+ bool dying;
+};
+
+static const struct rhashtable_params zones_params = {
+ .head_offset = offsetof(struct tcf_ct_flow_table, node),
+ .key_offset = offsetof(struct tcf_ct_flow_table, zone),
+ .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
+ .automatic_shrinking = true,
+};
+
+static struct flow_action_entry *
+tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
+{
+ int i = flow_action->num_entries++;
+
+ return &flow_action->entries[i];
+}
+
+static void tcf_ct_add_mangle_action(struct flow_action *action,
+ enum flow_action_mangle_base htype,
+ u32 offset,
+ u32 mask,
+ u32 val)
+{
+ struct flow_action_entry *entry;
+
+ entry = tcf_ct_flow_table_flow_action_get_next(action);
+ entry->id = FLOW_ACTION_MANGLE;
+ entry->mangle.htype = htype;
+ entry->mangle.mask = ~mask;
+ entry->mangle.offset = offset;
+ entry->mangle.val = val;
+}
+
+/* The following nat helper functions check if the inverted reverse tuple
+ * (target) is different then the current dir tuple - meaning nat for ports
+ * and/or ip is needed, and add the relevant mangle actions.
+ */
+static void
+tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ offsetof(struct iphdr, saddr),
+ 0xFFFFFFFF,
+ be32_to_cpu(target.src.u3.ip));
+ if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ offsetof(struct iphdr, daddr),
+ 0xFFFFFFFF,
+ be32_to_cpu(target.dst.u3.ip));
+}
+
+static void
+tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
+ union nf_inet_addr *addr,
+ u32 offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ i * sizeof(u32) + offset,
+ 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
+}
+
+static void
+tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
+ tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
+ offsetof(struct ipv6hdr,
+ saddr));
+ if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
+ tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
+ offsetof(struct ipv6hdr,
+ daddr));
+}
+
+static void
+tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ __be16 target_src = target.src.u.tcp.port;
+ __be16 target_dst = target.dst.u.tcp.port;
+
+ if (target_src != tuple->src.u.tcp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct tcphdr, source),
+ 0xFFFF, be16_to_cpu(target_src));
+ if (target_dst != tuple->dst.u.tcp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct tcphdr, dest),
+ 0xFFFF, be16_to_cpu(target_dst));
+}
+
+static void
+tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ __be16 target_src = target.src.u.udp.port;
+ __be16 target_dst = target.dst.u.udp.port;
+
+ if (target_src != tuple->src.u.udp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct udphdr, source),
+ 0xFFFF, be16_to_cpu(target_src));
+ if (target_dst != tuple->dst.u.udp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct udphdr, dest),
+ 0xFFFF, be16_to_cpu(target_dst));
+}
+
+static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ struct flow_action *action)
+{
+ struct nf_conn_labels *ct_labels;
+ struct flow_action_entry *entry;
+ enum ip_conntrack_info ctinfo;
+ u32 *act_ct_labels;
+
+ entry = tcf_ct_flow_table_flow_action_get_next(action);
+ entry->id = FLOW_ACTION_CT_METADATA;
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+ entry->ct_metadata.mark = ct->mark;
+#endif
+ ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
+ IP_CT_ESTABLISHED_REPLY;
+ /* aligns with the CT reference on the SKB nf_ct_set */
+ entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
+
+ act_ct_labels = entry->ct_metadata.labels;
+ ct_labels = nf_ct_labels_find(ct);
+ if (ct_labels)
+ memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
+ else
+ memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
+}
+
+static int tcf_ct_flow_table_add_action_nat(struct net *net,
+ struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ struct flow_action *action)
+{
+ const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
+ struct nf_conntrack_tuple target;
+
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
+
+ switch (tuple->src.l3num) {
+ case NFPROTO_IPV4:
+ tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
+ action);
+ break;
+ case NFPROTO_IPV6:
+ tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
+ action);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
+ break;
+ case IPPROTO_UDP:
+ tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tcf_ct_flow_table_fill_actions(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir tdir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action *action = &flow_rule->rule->action;
+ int num_entries = action->num_entries;
+ struct nf_conn *ct = flow->ct;
+ enum ip_conntrack_dir dir;
+ int i, err;
+
+ switch (tdir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ dir = IP_CT_DIR_ORIGINAL;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ dir = IP_CT_DIR_REPLY;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
+ if (err)
+ goto err_nat;
+
+ tcf_ct_flow_table_add_action_meta(ct, dir, action);
+ return 0;
+
+err_nat:
+ /* Clear filled actions */
+ for (i = num_entries; i < action->num_entries; i++)
+ memset(&action->entries[i], 0, sizeof(action->entries[i]));
+ action->num_entries = num_entries;
+
+ return err;
+}
+
+static struct nf_flowtable_type flowtable_ct = {
+ .action = tcf_ct_flow_table_fill_actions,
+ .owner = THIS_MODULE,
+};
+
+static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
+{
+ struct tcf_ct_flow_table *ct_ft;
+ int err = -ENOMEM;
+
+ mutex_lock(&zones_mutex);
+ ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
+ if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
+ goto out_unlock;
+
+ ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
+ if (!ct_ft)
+ goto err_alloc;
+ refcount_set(&ct_ft->ref, 1);
+
+ ct_ft->zone = params->zone;
+ err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
+ if (err)
+ goto err_insert;
+
+ ct_ft->nf_ft.type = &flowtable_ct;
+ ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
+ err = nf_flow_table_init(&ct_ft->nf_ft);
+ if (err)
+ goto err_init;
+
+ __module_get(THIS_MODULE);
+out_unlock:
+ params->ct_ft = ct_ft;
+ params->nf_ft = &ct_ft->nf_ft;
+ mutex_unlock(&zones_mutex);
+
+ return 0;
+
+err_init:
+ rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
+err_insert:
+ kfree(ct_ft);
+err_alloc:
+ mutex_unlock(&zones_mutex);
+ return err;
+}
+
+static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
+{
+ struct tcf_ct_flow_table *ct_ft;
+
+ ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
+ rwork);
+ nf_flow_table_free(&ct_ft->nf_ft);
+ kfree(ct_ft);
+
+ module_put(THIS_MODULE);
+}
+
+static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
+{
+ struct tcf_ct_flow_table *ct_ft = params->ct_ft;
+
+ if (refcount_dec_and_test(&params->ct_ft->ref)) {
+ rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
+ INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
+ queue_rcu_work(act_ct_wq, &ct_ft->rwork);
+ }
+}
+
+static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ bool tcp)
+{
+ struct flow_offload *entry;
+ int err;
+
+ if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+ return;
+
+ entry = flow_offload_alloc(ct);
+ if (!entry) {
+ WARN_ON_ONCE(1);
+ goto err_alloc;
+ }
+
+ if (tcp) {
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ }
+
+ err = flow_offload_add(&ct_ft->nf_ft, entry);
+ if (err)
+ goto err_add;
+
+ return;
+
+err_add:
+ flow_offload_free(entry);
+err_alloc:
+ clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+}
+
+static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ bool tcp = false;
+
+ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
+ return;
+
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ tcp = true;
+ if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+ return;
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ return;
+ }
+
+ if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
+ ct->status & IPS_SEQ_ADJUST)
+ return;
+
+ tcf_ct_flow_table_add(ct_ft, ct, tcp);
+}
+
+static bool
+tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
+ struct flow_offload_tuple *tuple,
+ struct tcphdr **tcph)
+{
+ struct flow_ports *ports;
+ unsigned int thoff;
+ struct iphdr *iph;
+
+ if (!pskb_network_may_pull(skb, sizeof(*iph)))
+ return false;
+
+ iph = ip_hdr(skb);
+ thoff = iph->ihl * 4;
+
+ if (ip_is_fragment(iph) ||
+ unlikely(thoff != sizeof(struct iphdr)))
+ return false;
+
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return false;
+
+ if (iph->ttl <= 1)
+ return false;
+
+ if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
+ thoff + sizeof(struct tcphdr) :
+ thoff + sizeof(*ports)))
+ return false;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_TCP)
+ *tcph = (void *)(skb_network_header(skb) + thoff);
+
+ ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+ tuple->src_v4.s_addr = iph->saddr;
+ tuple->dst_v4.s_addr = iph->daddr;
+ tuple->src_port = ports->source;
+ tuple->dst_port = ports->dest;
+ tuple->l3proto = AF_INET;
+ tuple->l4proto = iph->protocol;
+
+ return true;
+}
+
+static bool
+tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
+ struct flow_offload_tuple *tuple,
+ struct tcphdr **tcph)
+{
+ struct flow_ports *ports;
+ struct ipv6hdr *ip6h;
+ unsigned int thoff;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
+ return false;
+
+ ip6h = ipv6_hdr(skb);
+
+ if (ip6h->nexthdr != IPPROTO_TCP &&
+ ip6h->nexthdr != IPPROTO_UDP)
+ return false;
+
+ if (ip6h->hop_limit <= 1)
+ return false;
+
+ thoff = sizeof(*ip6h);
+ if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
+ thoff + sizeof(struct tcphdr) :
+ thoff + sizeof(*ports)))
+ return false;
+
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ *tcph = (void *)(skb_network_header(skb) + thoff);
+
+ ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+ tuple->src_v6 = ip6h->saddr;
+ tuple->dst_v6 = ip6h->daddr;
+ tuple->src_port = ports->source;
+ tuple->dst_port = ports->dest;
+ tuple->l3proto = AF_INET6;
+ tuple->l4proto = ip6h->nexthdr;
+
+ return true;
+}
+
+static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ struct sk_buff *skb,
+ u8 family)
+{
+ struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct flow_offload_tuple tuple = {};
+ enum ip_conntrack_info ctinfo;
+ struct tcphdr *tcph = NULL;
+ struct flow_offload *flow;
+ struct nf_conn *ct;
+ u8 dir;
+
+ /* Previously seen or loopback */
+ ct = nf_ct_get(skb, &ctinfo);
+ if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
+ return false;
+
+ switch (family) {
+ case NFPROTO_IPV4:
+ if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
+ return false;
+ break;
+ case NFPROTO_IPV6:
+ if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ tuplehash = flow_offload_lookup(nf_ft, &tuple);
+ if (!tuplehash)
+ return false;
+
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+ ct = flow->ct;
+
+ if (tcph && (unlikely(tcph->fin || tcph->rst))) {
+ flow_offload_teardown(flow);
+ return false;
+ }
+
+ ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
+ IP_CT_ESTABLISHED_REPLY;
+
+ flow_offload_refresh(nf_ft, flow);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+
+ return true;
+}
+
+static int tcf_ct_flow_tables_init(void)
+{
+ return rhashtable_init(&zones_ht, &zones_params);
+}
+
+static void tcf_ct_flow_tables_uninit(void)
+{
+ rhashtable_destroy(&zones_ht);
+}
+
static struct tc_action_ops act_ct_ops;
static unsigned int ct_net_id;
@@ -207,6 +726,8 @@ static void tcf_ct_params_free(struct rcu_head *head)
struct tcf_ct_params *params = container_of(head,
struct tcf_ct_params, rcu);
+ tcf_ct_flow_table_put(params);
+
if (params->tmpl)
nf_conntrack_put(&params->tmpl->ct_general);
kfree(params);
@@ -387,6 +908,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
struct nf_hook_state state;
int nh_ofs, err, retval;
struct tcf_ct_params *p;
+ bool skip_add = false;
struct nf_conn *ct;
u8 family;
@@ -436,6 +958,11 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
*/
cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
if (!cached) {
+ if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+ skip_add = true;
+ goto do_nat;
+ }
+
/* Associate skb with specified zone. */
if (tmpl) {
ct = nf_ct_get(skb, &ctinfo);
@@ -453,6 +980,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
goto out_push;
}
+do_nat:
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
goto out_push;
@@ -470,6 +998,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
* even if the connection is already confirmed.
*/
nf_conntrack_confirm(skb);
+ } else if (!skip_add) {
+ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
}
out_push:
@@ -730,6 +1260,10 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (err)
goto cleanup;
+ err = tcf_ct_flow_table_get(params);
+ if (err)
+ goto cleanup;
+
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
params = rcu_replace_pointer(c->params, params,
@@ -974,13 +1508,46 @@ static struct pernet_operations ct_net_ops = {
static int __init ct_init_module(void)
{
- return tcf_register_action(&act_ct_ops, &ct_net_ops);
+ int err;
+
+ act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
+ if (!act_ct_wq)
+ return -ENOMEM;
+
+ err = tcf_ct_flow_tables_init();
+ if (err)
+ goto err_tbl_init;
+
+ err = tcf_register_action(&act_ct_ops, &ct_net_ops);
+ if (err)
+ goto err_register;
+
+ return 0;
+
+err_tbl_init:
+ destroy_workqueue(act_ct_wq);
+err_register:
+ tcf_ct_flow_tables_uninit();
+ return err;
}
static void __exit ct_cleanup_module(void)
{
tcf_unregister_action(&act_ct_ops, &ct_net_ops);
+ tcf_ct_flow_tables_uninit();
+ destroy_workqueue(act_ct_wq);
+}
+
+void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
+{
+ enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
+ struct nf_conn *ct;
+
+ ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
}
+EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb);
module_init(ct_init_module);
module_exit(ct_cleanup_module);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 4e766c5ab77a..2046102a763e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -22,6 +22,7 @@
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <linux/jhash.h>
+#include <linux/rculist.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -354,7 +355,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
- list_add_tail(&chain->list, &block->chain_list);
+ list_add_tail_rcu(&chain->list, &block->chain_list);
mutex_init(&chain->filter_chain_lock);
chain->block = block;
chain->index = chain_index;
@@ -394,7 +395,7 @@ static bool tcf_chain_detach(struct tcf_chain *chain)
ASSERT_BLOCK_LOCKED(block);
- list_del(&chain->list);
+ list_del_rcu(&chain->list);
if (!chain->index)
block->chain0.chain = NULL;
@@ -453,6 +454,20 @@ static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
return NULL;
}
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
+ u32 chain_index)
+{
+ struct tcf_chain *chain;
+
+ list_for_each_entry_rcu(chain, &block->chain_list, list) {
+ if (chain->index == chain_index)
+ return chain;
+ }
+ return NULL;
+}
+#endif
+
static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
u32 seq, u16 flags, int event, bool unicast);
@@ -1559,12 +1574,15 @@ static int tcf_block_setup(struct tcf_block *block,
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
*/
-int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode)
+static inline int __tcf_classify(struct sk_buff *skb,
+ const struct tcf_proto *tp,
+ const struct tcf_proto *orig_tp,
+ struct tcf_result *res,
+ bool compat_mode,
+ u32 *last_executed_chain)
{
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4;
- const struct tcf_proto *orig_tp = tp;
const struct tcf_proto *first_tp;
int limit = 0;
@@ -1582,21 +1600,11 @@ reclassify:
#ifdef CONFIG_NET_CLS_ACT
if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
first_tp = orig_tp;
+ *last_executed_chain = first_tp->chain->index;
goto reset;
} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
first_tp = res->goto_tp;
-
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- {
- struct tc_skb_ext *ext;
-
- ext = skb_ext_add(skb, TC_SKB_EXT);
- if (WARN_ON_ONCE(!ext))
- return TC_ACT_SHOT;
-
- ext->chain = err & TC_ACT_EXT_VAL_MASK;
- }
-#endif
+ *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
goto reset;
}
#endif
@@ -1619,8 +1627,64 @@ reset:
goto reclassify;
#endif
}
+
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ u32 last_executed_chain = 0;
+
+ return __tcf_classify(skb, tp, tp, res, compat_mode,
+ &last_executed_chain);
+}
EXPORT_SYMBOL(tcf_classify);
+int tcf_classify_ingress(struct sk_buff *skb,
+ const struct tcf_block *ingress_block,
+ const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 last_executed_chain = 0;
+
+ return __tcf_classify(skb, tp, tp, res, compat_mode,
+ &last_executed_chain);
+#else
+ u32 last_executed_chain = tp ? tp->chain->index : 0;
+ const struct tcf_proto *orig_tp = tp;
+ struct tc_skb_ext *ext;
+ int ret;
+
+ ext = skb_ext_find(skb, TC_SKB_EXT);
+
+ if (ext && ext->chain) {
+ struct tcf_chain *fchain;
+
+ fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
+ if (!fchain)
+ return TC_ACT_SHOT;
+
+ /* Consume, so cloned/redirect skbs won't inherit ext */
+ skb_ext_del(skb, TC_SKB_EXT);
+
+ tp = rcu_dereference_bh(fchain->filter_chain);
+ }
+
+ ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
+ &last_executed_chain);
+
+ /* If we missed on some chain */
+ if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+ ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (WARN_ON_ONCE(!ext))
+ return TC_ACT_SHOT;
+ ext->chain = last_executed_chain;
+ }
+
+ return ret;
+#endif
+}
+EXPORT_SYMBOL(tcf_classify_ingress);
+
struct tcf_chain_info {
struct tcf_proto __rcu **pprev;
struct tcf_proto __rcu *next;
@@ -3464,6 +3528,10 @@ int tc_setup_flow_action(struct flow_action *flow_action,
struct tc_action *act;
int i, j, k, err = 0;
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_TYPE_ANY != FLOW_ACTION_HW_STATS_TYPE_ANY);
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_TYPE_IMMEDIATE != FLOW_ACTION_HW_STATS_TYPE_IMMEDIATE);
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_TYPE_DELAYED != FLOW_ACTION_HW_STATS_TYPE_DELAYED);
+
if (!exts)
return 0;
@@ -3476,6 +3544,9 @@ int tc_setup_flow_action(struct flow_action *flow_action,
err = tcf_act_get_cookie(entry, act);
if (err)
goto err_out_locked;
+
+ entry->hw_stats_type = act->hw_stats_type;
+
if (is_tcf_gact_ok(act)) {
entry->id = FLOW_ACTION_ACCEPT;
} else if (is_tcf_gact_shot(act)) {
@@ -3565,6 +3636,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->id = FLOW_ACTION_CT;
entry->ct.action = tcf_ct_action(act);
entry->ct.zone = tcf_ct_zone(act);
+ entry->ct.flow_table = tcf_ct_ft(act);
} else if (is_tcf_mpls(act)) {
switch (tcf_mpls_action(act)) {
case TCA_MPLS_ACT_PUSH:
diff --git a/net/sched/em_ipt.c b/net/sched/em_ipt.c
index 9fff6480acc6..eecfe072c508 100644
--- a/net/sched/em_ipt.c
+++ b/net/sched/em_ipt.c
@@ -22,7 +22,7 @@ struct em_ipt_match {
const struct xt_match *match;
u32 hook;
u8 nfproto;
- u8 match_data[0] __aligned(8);
+ u8 match_data[] __aligned(8);
};
struct em_ipt_xt_match {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 88c7ce42df7e..2c1192a2ee5e 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -16,7 +16,7 @@
struct nbyte_data {
struct tcf_em_nbyte hdr;
- char pattern[0];
+ char pattern[];
};
static int em_nbyte_change(struct net *net, void *data, int data_len,
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index f4f9b8cdbffb..ee12ca9f55b4 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -58,7 +58,7 @@ struct atm_flow_data {
struct atm_flow_data *excess; /* flow for excess traffic;
NULL to set CLP instead */
int hdr_len;
- unsigned char hdr[0]; /* header data; MUST BE LAST */
+ unsigned char hdr[]; /* header data; MUST BE LAST */
};
struct atm_qdisc_data {
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 37c8aa75d70c..a579a4131d22 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
/* 1 band FIFO pseudo-"scheduler" */
@@ -51,8 +52,49 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_CN;
}
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
+static void fifo_offload_init(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_fifo_qopt_offload qopt;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
+ qopt.command = TC_FIFO_REPLACE;
+ qopt.handle = sch->handle;
+ qopt.parent = sch->parent;
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
+}
+
+static void fifo_offload_destroy(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_fifo_qopt_offload qopt;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
+ qopt.command = TC_FIFO_DESTROY;
+ qopt.handle = sch->handle;
+ qopt.parent = sch->parent;
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
+}
+
+static int fifo_offload_dump(struct Qdisc *sch)
+{
+ struct tc_fifo_qopt_offload qopt;
+
+ qopt.command = TC_FIFO_STATS;
+ qopt.handle = sch->handle;
+ qopt.parent = sch->parent;
+ qopt.stats.bstats = &sch->bstats;
+ qopt.stats.qstats = &sch->qstats;
+
+ return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
+}
+
+static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
bool bypass;
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -82,10 +124,35 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
sch->flags |= TCQ_F_CAN_BYPASS;
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
+
return 0;
}
-static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = __fifo_init(sch, opt, extack);
+ if (err)
+ return err;
+
+ fifo_offload_init(sch);
+ return 0;
+}
+
+static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ return __fifo_init(sch, opt, extack);
+}
+
+static void fifo_destroy(struct Qdisc *sch)
+{
+ fifo_offload_destroy(sch);
+}
+
+static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct tc_fifo_qopt opt = { .limit = sch->limit };
@@ -97,6 +164,22 @@ nla_put_failure:
return -1;
}
+static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ int err;
+
+ err = fifo_offload_dump(sch);
+ if (err)
+ return err;
+
+ return __fifo_dump(sch, skb);
+}
+
+static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ return __fifo_dump(sch, skb);
+}
+
struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
.id = "pfifo",
.priv_size = 0,
@@ -104,6 +187,7 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.init = fifo_init,
+ .destroy = fifo_destroy,
.reset = qdisc_reset_queue,
.change = fifo_init,
.dump = fifo_dump,
@@ -118,6 +202,7 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.init = fifo_init,
+ .destroy = fifo_destroy,
.reset = qdisc_reset_queue,
.change = fifo_init,
.dump = fifo_dump,
@@ -131,10 +216,10 @@ struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
.enqueue = pfifo_tail_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
- .init = fifo_init,
+ .init = fifo_hd_init,
.reset = qdisc_reset_queue,
- .change = fifo_init,
- .dump = fifo_dump,
+ .change = fifo_hd_init,
+ .dump = fifo_hd_dump,
.owner = THIS_MODULE,
};
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a5a295477ecc..371ad84def3b 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -744,6 +744,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
+ [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
[TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
};
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 214657eb3dfd..a9da8776bf5b 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -189,7 +189,6 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
sel_flow->vars.accu_prob = 0;
- sel_flow->vars.accu_prob_overflows = 0;
__qdisc_drop(skb, to_free);
qdisc_qstats_drop(sch);
return NET_XMIT_CN;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6c9595f1048a..2efd5b61acef 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1391,6 +1391,14 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
}
EXPORT_SYMBOL(mini_qdisc_pair_swap);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block)
+{
+ miniqp->miniq1.block = block;
+ miniqp->miniq2.block = block;
+}
+EXPORT_SYMBOL(mini_qdisc_pair_block_init);
+
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq)
{
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index bf56aa519797..84838128b9c5 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -78,6 +78,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
{
struct ingress_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
+ int err;
net_inc_ingress_queue();
@@ -87,7 +88,13 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
q->block_info.chain_head_change = clsact_chain_head_change;
q->block_info.chain_head_change_priv = &q->miniqp;
- return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
+ err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
+ if (err)
+ return err;
+
+ mini_qdisc_pair_block_init(&q->miniqp, q->block);
+
+ return 0;
}
static void ingress_destroy(struct Qdisc *sch)
@@ -226,6 +233,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
if (err)
return err;
+ mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
+
mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 42e557d48e4e..84f82771cdf5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -66,7 +66,7 @@
struct disttable {
u32 size;
- s16 table[0];
+ s16 table[];
};
struct netem_sched_data {
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 915bcdb59a9f..c65077f0c0f3 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -31,7 +31,7 @@ struct pie_sched_data {
};
bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
- struct pie_vars *vars, u32 qlen, u32 packet_size)
+ struct pie_vars *vars, u32 backlog, u32 packet_size)
{
u64 rnd;
u64 local_prob = vars->prob;
@@ -51,7 +51,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
/* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
* similar to min_th in RED
*/
- if (qlen < 2 * mtu)
+ if (backlog < 2 * mtu)
return false;
/* If bytemode is turned on, use packet size to compute new
@@ -62,27 +62,19 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
else
local_prob = vars->prob;
- if (local_prob == 0) {
+ if (local_prob == 0)
vars->accu_prob = 0;
- vars->accu_prob_overflows = 0;
- }
-
- if (local_prob > MAX_PROB - vars->accu_prob)
- vars->accu_prob_overflows++;
-
- vars->accu_prob += local_prob;
+ else
+ vars->accu_prob += local_prob;
- if (vars->accu_prob_overflows == 0 &&
- vars->accu_prob < (MAX_PROB / 100) * 85)
+ if (vars->accu_prob < (MAX_PROB / 100) * 85)
return false;
- if (vars->accu_prob_overflows == 8 &&
- vars->accu_prob >= MAX_PROB / 2)
+ if (vars->accu_prob >= (MAX_PROB / 2) * 17)
return true;
prandom_bytes(&rnd, 8);
- if (rnd < local_prob) {
+ if ((rnd >> BITS_PER_BYTE) < local_prob) {
vars->accu_prob = 0;
- vars->accu_prob_overflows = 0;
return true;
}
@@ -129,7 +121,6 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
q->vars.accu_prob = 0;
- q->vars.accu_prob_overflows = 0;
return qdisc_drop(skb, sch, to_free);
}
@@ -215,7 +206,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
}
void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
- struct pie_vars *vars, u32 qlen)
+ struct pie_vars *vars, u32 backlog)
{
psched_time_t now = psched_get_time();
u32 dtime = 0;
@@ -231,7 +222,7 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
vars->dq_tstamp = now;
- if (qlen == 0)
+ if (backlog == 0)
vars->qdelay = 0;
if (dtime == 0)
@@ -244,7 +235,7 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
* we have enough packets to calculate the drain rate. Save
* current time as dq_tstamp and start measurement cycle.
*/
- if (qlen >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
+ if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
vars->dq_tstamp = psched_get_time();
vars->dq_count = 0;
}
@@ -283,7 +274,7 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
* dq_count to 0 to re-enter the if block when the next
* packet is dequeued
*/
- if (qlen < QUEUE_THRESHOLD) {
+ if (backlog < QUEUE_THRESHOLD) {
vars->dq_count = DQCOUNT_INVALID;
} else {
vars->dq_count = 0;
@@ -307,7 +298,7 @@ burst_allowance_reduction:
EXPORT_SYMBOL_GPL(pie_process_dequeue);
void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
- u32 qlen)
+ u32 backlog)
{
psched_time_t qdelay = 0; /* in pschedtime */
psched_time_t qdelay_old = 0; /* in pschedtime */
@@ -322,7 +313,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
vars->qdelay_old = vars->qdelay;
if (vars->avg_dq_rate > 0)
- qdelay = (qlen << PIE_SCALE) / vars->avg_dq_rate;
+ qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate;
else
qdelay = 0;
} else {
@@ -330,10 +321,10 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
qdelay_old = vars->qdelay_old;
}
- /* If qdelay is zero and qlen is not, it means qlen is very small,
+ /* If qdelay is zero and backlog is not, it means backlog is very small,
* so we do not update probabilty in this round.
*/
- if (qdelay == 0 && qlen != 0)
+ if (qdelay == 0 && backlog != 0)
update_prob = false;
/* In the algorithm, alpha and beta are between 0 and 2 with typical
@@ -363,8 +354,8 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
}
/* alpha and beta should be between 0 and 32, in multiples of 1/16 */
- delta += alpha * (u64)(qdelay - params->target);
- delta += beta * (u64)(qdelay - qdelay_old);
+ delta += alpha * (qdelay - params->target);
+ delta += beta * (qdelay - qdelay_old);
oldprob = vars->prob;
@@ -409,7 +400,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
vars->prob -= vars->prob / 64;
vars->qdelay = qdelay;
- vars->qlen_old = qlen;
+ vars->backlog_old = backlog;
/* We restart the measurement cycle if the following conditions are met
* 1. If the delay has been low for 2 consecutive Tupdate periods
@@ -502,7 +493,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct pie_sched_data *q = qdisc_priv(sch);
struct tc_pie_xstats st = {
- .prob = q->vars.prob,
+ .prob = q->vars.prob << BITS_PER_BYTE,
.delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
NSEC_PER_USEC,
.packets_in = q->stats.packets_in,
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 660fc45ee40f..b1eb12d33b9a 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -564,8 +564,10 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
prio = skb->priority;
tc = netdev_get_prio_tc_map(dev, prio);
- if (!(gate_mask & BIT(tc)))
+ if (!(gate_mask & BIT(tc))) {
+ skb = NULL;
continue;
+ }
len = qdisc_pkt_len(skb);
guard = ktime_add_ns(taprio_get_time(q),
@@ -575,13 +577,17 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
* guard band ...
*/
if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
- ktime_after(guard, entry->close_time))
+ ktime_after(guard, entry->close_time)) {
+ skb = NULL;
continue;
+ }
/* ... and no budget. */
if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
- atomic_sub_return(len, &entry->budget) < 0)
+ atomic_sub_return(len, &entry->budget) < 0) {
+ skb = NULL;
continue;
+ }
skb = child->ops->dequeue(child);
if (unlikely(!skb))
@@ -768,6 +774,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
};
static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 8a15146faaeb..493fc01e5d2b 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -237,15 +237,11 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
addrcnt++;
return nla_total_size(sizeof(struct sctp_info))
- + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
- + nla_total_size(1) /* INET_DIAG_TOS */
- + nla_total_size(1) /* INET_DIAG_TCLASS */
- + nla_total_size(4) /* INET_DIAG_MARK */
- + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+ nla_total_size(addrlen * asoc->peer.transport_count)
+ nla_total_size(addrlen * addrcnt)
- + nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ + inet_diag_msg_attrs_size()
+ + nla_total_size(sizeof(struct inet_diag_meminfo))
+ 64;
}
@@ -432,11 +428,12 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
}
-static int sctp_diag_dump_one(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int sctp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
+ struct sk_buff *in_skb = cb->skb;
struct net *net = sock_net(in_skb->sk);
+ const struct nlmsghdr *nlh = cb->nlh;
union sctp_addr laddr, paddr;
struct sctp_comm_param commp = {
.skb = in_skb,
@@ -470,7 +467,7 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
}
static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
u32 idiag_states = r->idiag_states;
struct net *net = sock_net(skb->sk);
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 9239cf881f21..04b6fefb8bce 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -600,6 +600,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
smc_smcr_terminate_all(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
+ cancel_work_sync(&smcibdev->port_event_work);
kfree(smcibdev);
}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 7c35094c20b8..bb9862410e68 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -116,6 +116,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
[TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
[TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
[TIPC_NLA_PROP_WIN] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_MTU] = { .type = NLA_U32 },
[TIPC_NLA_PROP_BROADCAST] = { .type = NLA_U32 },
[TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
};
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9d0518d9bdd4..3385a7a0b231 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -690,7 +690,8 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
if (sk) {
u = unix_sk(sock->sk);
- seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
+ seq_printf(m, "scm_fds: %u\n",
+ atomic_read(&u->scm_stat.nr_fds));
}
}
#else
@@ -1602,10 +1603,8 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- lockdep_assert_held(&sk->sk_receive_queue.lock);
-
if (unlikely(fp && fp->count))
- u->scm_stat.nr_fds += fp->count;
+ atomic_add(fp->count, &u->scm_stat.nr_fds);
}
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
@@ -1613,10 +1612,8 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- lockdep_assert_held(&sk->sk_receive_queue.lock);
-
if (unlikely(fp && fp->count))
- u->scm_stat.nr_fds -= fp->count;
+ atomic_sub(fp->count, &u->scm_stat.nr_fds);
}
/*
@@ -1805,10 +1802,8 @@ restart_locked:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
- spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb);
- __skb_queue_tail(&other->sk_receive_queue, skb);
- spin_unlock(&other->sk_receive_queue.lock);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
@@ -1910,10 +1905,8 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto pipe_err_free;
maybe_add_creds(skb, sock, other);
- spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb);
- __skb_queue_tail(&other->sk_receive_queue, skb);
- spin_unlock(&other->sk_receive_queue.lock);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sent += size;
@@ -2113,9 +2106,12 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
skip = sk_peek_offset(sk, flags);
skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
- scm_stat_del, &skip, &err, &last);
- if (skb)
+ &skip, &err, &last);
+ if (skb) {
+ if (!(flags & MSG_PEEK))
+ scm_stat_del(sk, skb);
break;
+ }
mutex_unlock(&u->iolock);
@@ -2409,9 +2405,7 @@ unlock:
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp) {
- spin_lock(&sk->sk_receive_queue.lock);
scm_stat_del(sk, skb);
- spin_unlock(&sk->sk_receive_queue.lock);
unix_detach_fds(&scm, skb);
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 15000275b32d..750b73a52fd8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -493,6 +493,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_PLINK_STATE] =
NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
+ [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
+ [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
[NL80211_ATTR_MESH_PEER_AID] =
NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
@@ -554,6 +556,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MDID] = { .type = NLA_U16 },
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
+ [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
[NL80211_ATTR_PEER_AID] =
NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
@@ -584,6 +588,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+ [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
[NL80211_ATTR_MAC_MASK] = {
.type = NLA_EXACT_LEN_WARN,
.len = ETH_ALEN
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 89a01ac4e079..b50bb5c76da5 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -19,13 +19,13 @@ struct xdp_ring {
/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
struct xdp_ring ptrs;
- struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
+ struct xdp_desc desc[] ____cacheline_aligned_in_smp;
};
/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
struct xdp_ring ptrs;
- u64 desc[0] ____cacheline_aligned_in_smp;
+ u64 desc[] ____cacheline_aligned_in_smp;
};
struct xsk_queue {
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index f15d6a564b0e..037ea156d2f9 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -100,7 +100,7 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
flags |= nonblock ? MSG_DONTWAIT : 0;
- skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, NULL, &off, &err);
+ skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
if (!skb)
return err;
diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py
index 90baf7d70911..cebed6fb5bbb 100755
--- a/scripts/bpf_helpers_doc.py
+++ b/scripts/bpf_helpers_doc.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright (C) 2018-2019 Netronome Systems, Inc.
diff --git a/scripts/parse-maintainers.pl b/scripts/parse-maintainers.pl
index 255cef1b098d..255cef1b098d 100644..100755
--- a/scripts/parse-maintainers.pl
+++ b/scripts/parse-maintainers.pl
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 9f60a5037f8b..5bf1ea150f26 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -649,8 +649,6 @@ snd_sgio2audio_pcm_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.open = snd_sgio2audio_playback1_open,
.close = snd_sgio2audio_pcm_close,
- .hw_params = snd_sgio2audio_pcm_hw_params,
- .hw_free = snd_sgio2audio_pcm_hw_free,
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
@@ -659,8 +657,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
.open = snd_sgio2audio_playback2_open,
.close = snd_sgio2audio_pcm_close,
- .hw_params = snd_sgio2audio_pcm_hw_params,
- .hw_free = snd_sgio2audio_pcm_hw_free,
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
@@ -669,8 +665,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
.open = snd_sgio2audio_capture_open,
.close = snd_sgio2audio_pcm_close,
- .hw_params = snd_sgio2audio_pcm_hw_params,
- .hw_free = snd_sgio2audio_pcm_hw_free,
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 8c48864c844a..6567504665b9 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -271,13 +271,8 @@ static void snd_bt87x_free_risc(struct snd_bt87x *chip)
static void snd_bt87x_pci_error(struct snd_bt87x *chip, unsigned int status)
{
- u16 pci_status;
+ int pci_status = pci_status_get_and_clear_errors(chip->pci);
- pci_read_config_word(chip->pci, PCI_STATUS, &pci_status);
- pci_status &= PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY;
- pci_write_config_word(chip->pci, PCI_STATUS, pci_status);
if (pci_status != PCI_STATUS_DETECTED_PARITY)
dev_err(chip->card->dev,
"Aieee - PCI error! status %#08x, PCI status %#04x\n",
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 477589e7ec1d..0ac06ff1a17c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
@@ -5920,7 +5921,8 @@ enum {
ALC289_FIXUP_DUAL_SPK,
ALC294_FIXUP_SPK2_TO_DAC1,
ALC294_FIXUP_ASUS_DUAL_SPK,
-
+ ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ ALC294_FIXUP_ASUS_HPE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6684,6 +6686,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
},
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
@@ -7040,7 +7044,23 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_SPK2_TO_DAC1
},
-
+ [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_jack,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
+ },
+ [ALC294_FIXUP_ASUS_HPE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Set EAPD high */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7115,6 +7135,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7204,6 +7226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7274,8 +7297,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
- SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
+ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 7e90f5d83097..ea912439e446 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1406,7 +1406,7 @@ config SND_SOC_WM8737
depends on SND_SOC_I2C_AND_SPI
config SND_SOC_WM8741
- tristate "Wolfson Microelectronics WM8737 DAC"
+ tristate "Wolfson Microelectronics WM8741 DAC"
depends on SND_SOC_I2C_AND_SPI
config SND_SOC_WM8750
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 861210f6bf4f..4cbef9affffd 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1564,13 +1564,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
}
pcm512x->sclk = devm_clk_get(dev, NULL);
- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
if (!IS_ERR(pcm512x->sclk)) {
ret = clk_prepare_enable(pcm512x->sclk);
if (ret != 0) {
dev_err(dev, "Failed to enable SCLK: %d\n", ret);
- return ret;
+ goto err;
}
}
diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
index 6d490e2dbc25..66eb55b4ffd4 100644
--- a/sound/soc/codecs/rt1015.c
+++ b/sound/soc/codecs/rt1015.c
@@ -664,7 +664,7 @@ static int rt1015_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(component, RT1015_TDM_MASTER,
RT1015_I2S_DL_MASK, val_len);
snd_soc_component_update_bits(component, RT1015_CLK2,
- RT1015_FS_PD_MASK, pre_div);
+ RT1015_FS_PD_MASK, pre_div << RT1015_FS_PD_SFT);
return 0;
}
@@ -857,6 +857,7 @@ struct snd_soc_dai_driver rt1015_dai[] = {
.rates = RT1015_STEREO_RATES,
.formats = RT1015_FORMATS,
},
+ .ops = &rt1015_aif_dai_ops,
}
};
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index 729acd874c48..be52886a5edb 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -215,7 +215,8 @@ static int tas2562_set_bitwidth(struct tas2562_data *tas2562, int bitwidth)
break;
default:
- dev_info(tas2562->dev, "Not supported params format\n");
+ dev_info(tas2562->dev, "Unsupported bitwidth format\n");
+ return -EINVAL;
}
ret = snd_soc_component_update_bits(tas2562->component,
@@ -251,7 +252,7 @@ static int tas2562_hw_params(struct snd_pcm_substream *substream,
ret = tas2562_set_samplerate(tas2562, params_rate(params));
if (ret)
- dev_err(tas2562->dev, "set bitwidth failed, %d\n", ret);
+ dev_err(tas2562->dev, "set sample rate failed, %d\n", ret);
return ret;
}
diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
index 3466675f2678..a15aa2ffa681 100644
--- a/sound/soc/intel/skylake/skl-debug.c
+++ b/sound/soc/intel/skylake/skl-debug.c
@@ -34,8 +34,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
int i;
ssize_t ret = 0;
- for (i = 0; i < max_pin; i++)
- ret += snprintf(buf + size, MOD_BUF - size,
+ for (i = 0; i < max_pin; i++) {
+ ret += scnprintf(buf + size, MOD_BUF - size,
"%s %d\n\tModule %d\n\tInstance %d\n\t"
"In-used %s\n\tType %s\n"
"\tState %d\n\tIndex %d\n",
@@ -45,13 +45,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
m_pin[i].in_use ? "Used" : "Unused",
m_pin[i].is_dynamic ? "Dynamic" : "Static",
m_pin[i].pin_state, i);
+ size += ret;
+ }
return ret;
}
static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
ssize_t size, bool direction)
{
- return snprintf(buf + size, MOD_BUF - size,
+ return scnprintf(buf + size, MOD_BUF - size,
"%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
"Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
"Sample Type %d\n\tCh Map %#x\n",
@@ -75,16 +77,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
+ ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
"\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
mconfig->id.module_id, mconfig->id.instance_id,
mconfig->id.pvt_id);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n",
res->cpc, res->ibs, res->obs);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Module data:\n\tCore %d\n\tIn queue %d\n\t"
"Out queue %d\n\tType %s\n",
mconfig->core_id, mconfig->max_in_queue,
@@ -94,38 +96,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Fixup:\n\tParams %#x\n\tConverter %#x\n",
mconfig->params_fixup, mconfig->converter);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
mconfig->dev_type, mconfig->vbus_id,
mconfig->hw_conn_type, mconfig->time_slot);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
"Pages %#x\n", mconfig->pipe->ppl_id,
mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
mconfig->pipe->memory_pages);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
mconfig->pipe->p_params->host_dma_id,
mconfig->pipe->p_params->link_dma_id);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
mconfig->pipe->p_params->ch,
mconfig->pipe->p_params->s_freq,
mconfig->pipe->p_params->s_fmt);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tLink %#x\n\tStream %#x\n",
mconfig->pipe->p_params->linktype,
mconfig->pipe->p_params->stream);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tState %d\n\tPassthru %s\n",
mconfig->pipe->state,
mconfig->pipe->passthru ? "true" : "false");
@@ -135,7 +137,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
ret += skl_print_pins(mconfig->m_out_pin, buf,
mconfig->max_out_queue, ret, false);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
"Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
"Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
@@ -191,7 +193,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
__ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
- ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+ ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
tmp + ret, FW_REG_BUF - ret, 0);
ret += strlen(tmp + ret);
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c
index 1c0e5226cb5b..bd43885f3805 100644
--- a/sound/soc/intel/skylake/skl-ssp-clk.c
+++ b/sound/soc/intel/skylake/skl-ssp-clk.c
@@ -384,9 +384,11 @@ static int skl_clk_dev_probe(struct platform_device *pdev)
&clks[i], clk_pdata, i);
if (IS_ERR(data->clk[data->avail_clk_cnt])) {
- ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
+ ret = PTR_ERR(data->clk[data->avail_clk_cnt]);
goto err_unreg_skl_clk;
}
+
+ data->avail_clk_cnt++;
}
platform_set_drvdata(pdev, data);
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index 9cfbd343a00c..8a0db28a6a40 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <sound/pcm_params.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
@@ -378,6 +379,11 @@ static int g12a_tohdmitx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
void __iomem *regs;
struct regmap *map;
+ int ret;
+
+ ret = device_reset(dev);
+ if (ret)
+ return ret;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index 14e175cdeeb8..785a0385cc7f 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -451,7 +451,7 @@ int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
int i, ret;
for_each_rtd_components(rtd, i, component) {
- if (component->driver->ioctl) {
+ if (component->driver->sync_stop) {
ret = component->driver->sync_stop(component,
substream);
if (ret < 0)
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 223cd045719e..392a1c5b15d3 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -299,7 +299,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
- snd_soc_dapm_stream_stop(fe, stream);
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 9b130561d562..9fb54e6fe254 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -4772,7 +4772,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
continue;
if (w->power) {
dapm_seq_insert(w, &down_list, false);
- w->power = 0;
+ w->new_power = 0;
powerdown = 1;
}
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index ff1b7c7078e5..2c59b3688ca0 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2006,7 +2006,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
soc_pcm_close(substream);
/* run the stream event for each BE */
- snd_soc_dapm_stream_stop(fe, stream);
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
@@ -3171,16 +3171,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
unsigned long flags;
/* FE state */
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"[%s - %s]\n", fe->dai_link->name,
stream ? "Capture" : "Playback");
- offset += snprintf(buf + offset, size - offset, "State: %s\n",
+ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
dpcm_state_string(fe->dpcm[stream].state));
if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
@@ -3188,10 +3188,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
params_rate(params));
/* BEs state */
- offset += snprintf(buf + offset, size - offset, "Backends:\n");
+ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
if (list_empty(&fe->dpcm[stream].be_clients)) {
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" No active DSP links\n");
goto out;
}
@@ -3201,16 +3201,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"- %s\n", be->dai_link->name);
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" State: %s\n",
dpcm_state_string(be->dpcm[stream].state));
if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d2ee6ad20e83..575da6aba807 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -2377,8 +2377,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
}
ret = soc_tplg_link_config(tplg, _link);
- if (ret < 0)
+ if (ret < 0) {
+ if (!abi_match)
+ kfree(_link);
return ret;
+ }
/* offset by version-specific struct size and
* real priv data size
@@ -2542,7 +2545,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
{
struct snd_soc_tplg_manifest *manifest, *_manifest;
bool abi_match;
- int err;
+ int ret = 0;
if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
return 0;
@@ -2555,19 +2558,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
_manifest = manifest;
} else {
abi_match = false;
- err = manifest_new_ver(tplg, manifest, &_manifest);
- if (err < 0)
- return err;
+ ret = manifest_new_ver(tplg, manifest, &_manifest);
+ if (ret < 0)
+ return ret;
}
/* pass control to component driver for optional further init */
if (tplg->comp && tplg->ops && tplg->ops->manifest)
- return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
+ ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
if (!abi_match) /* free the duplicated one */
kfree(_manifest);
- return 0;
+ return ret;
}
/* validate header magic, size and type */
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index b63fc529b456..78aa1da7c7a9 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -499,7 +499,7 @@ int snd_sof_ipc_stream_posn(struct snd_soc_component *scomp,
/* send IPC to the DSP */
err = sof_ipc_tx_message(sdev->ipc,
- stream.hdr.cmd, &stream, sizeof(stream), &posn,
+ stream.hdr.cmd, &stream, sizeof(stream), posn,
sizeof(*posn));
if (err < 0) {
dev_err(sdev->dev, "error: failed to get stream %d position\n",
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 30bcd5d3a32a..10eb4b8e8e7e 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1543,20 +1543,20 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &stm32_component,
- &sai->cpu_dai_drv, 1);
+ ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register pcm dma\n");
+ return ret;
+ }
+
+ ret = snd_soc_register_component(&pdev->dev, &stm32_component,
+ &sai->cpu_dai_drv, 1);
if (ret)
return ret;
if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
conf = &stm32_sai_pcm_config_spdif;
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
- if (ret) {
- dev_err(&pdev->dev, "Could not register pcm dma\n");
- return ret;
- }
-
return 0;
}
@@ -1565,6 +1565,8 @@ static int stm32_sai_sub_remove(struct platform_device *pdev)
struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev);
clk_unprepare(sai->pdata->pclk);
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index ebe1685e92dd..d5e517d1c3dd 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -512,6 +512,8 @@
#define MSR_K7_HWCR 0xc0010015
#define MSR_K7_HWCR_SMMLOCK_BIT 0
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
+#define MSR_K7_HWCR_IRPERF_EN_BIT 30
+#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 503d3f42da16..3f3f780c8c65 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -390,6 +390,7 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
+#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 4d08f35034a2..b04156cfd7a3 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -19,19 +19,24 @@ SYNOPSIS
FEATURE COMMANDS
================
-| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**macros** [**prefix** *PREFIX*]]
| **bpftool** **feature help**
|
| *COMPONENT* := { **kernel** | **dev** *NAME* }
DESCRIPTION
===========
- **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ **bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]]
Probe the running kernel and dump a number of eBPF-related
parameters, such as availability of the **bpf()** system call,
JIT status, eBPF program types availability, eBPF helper
functions availability, and more.
+ By default, bpftool **does not run probes** for
+ **bpf_probe_write_user**\ () and **bpf_trace_printk**\()
+ helpers which print warnings to kernel logs. To enable them
+ and run all probes, the **full** keyword should be used.
+
If the **macros** keyword (but not the **-j** option) is
passed, a subset of the output is dumped as a list of
**#define** macros that are ready to be included in a C
@@ -44,16 +49,12 @@ DESCRIPTION
Keyword **kernel** can be omitted. If no probe target is
specified, probing the kernel is the default behaviour.
- Note that when probed, some eBPF helpers (e.g.
- **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
- print warnings to kernel logs.
-
- **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ **bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]]
Probe network device for supported eBPF features and dump
results to the console.
- The two keywords **macros** and **prefix** have the same
- role as when probing the kernel.
+ The keywords **full**, **macros** and **prefix** have the
+ same role as when probing the kernel.
**bpftool feature help**
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 64ddf8a4c518..46862e85fed2 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -42,7 +42,8 @@ PROG COMMANDS
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-| **cgroup/getsockopt** | **cgroup/setsockopt**
+| **cgroup/getsockopt** | **cgroup/setsockopt** |
+| **struct_ops** | **fentry** | **fexit** | **freplace**
| }
| *ATTACH_TYPE* := {
| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 754d8395e451..f2838a658339 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -469,7 +469,8 @@ _bpftool()
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
- cgroup/setsockopt" -- \
+ cgroup/setsockopt struct_ops \
+ fentry fexit freplace" -- \
"$cur" ) )
return 0
;;
@@ -983,11 +984,12 @@ _bpftool()
probe)
[[ $prev == "prefix" ]] && return 0
if _bpftool_search_list 'macros'; then
- COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ _bpftool_once_attr 'prefix'
else
COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
fi
_bpftool_one_of_list 'kernel dev'
+ _bpftool_once_attr 'full'
return 0
;;
*)
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 941873d778d8..88718ee6a438 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -112,18 +112,12 @@ print_start_section(const char *json_title, const char *plain_title,
}
}
-static void
-print_end_then_start_section(const char *json_title, const char *plain_title,
- const char *define_comment,
- const char *define_prefix)
+static void print_end_section(void)
{
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
-
- print_start_section(json_title, plain_title, define_comment,
- define_prefix);
}
/* Probing functions */
@@ -520,13 +514,38 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
}
static void
+probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, unsigned int id,
+ const char *ptype_name, __u32 ifindex)
+{
+ bool res;
+
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+}
+
+static void
probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
- const char *define_prefix, __u32 ifindex)
+ const char *define_prefix, bool full_mode,
+ __u32 ifindex)
{
const char *ptype_name = prog_type_name[prog_type];
char feat_name[128];
unsigned int id;
- bool res;
if (ifindex)
/* Only test helpers for offload-able program types */
@@ -548,21 +567,19 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
}
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
- if (!supported_type)
- res = false;
- else
- res = bpf_probe_helper(id, prog_type, ifindex);
-
- if (json_output) {
- if (res)
- jsonw_string(json_wtr, helper_name[id]);
- } else if (define_prefix) {
- printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
- define_prefix, ptype_name, helper_name[id],
- res ? "1" : "0");
- } else {
- if (res)
- printf("\n\t- %s", helper_name[id]);
+ /* Skip helper functions which emit dmesg messages when not in
+ * the full mode.
+ */
+ switch (id) {
+ case BPF_FUNC_trace_printk:
+ case BPF_FUNC_probe_write_user:
+ if (!full_mode)
+ continue;
+ /* fallthrough */
+ default:
+ probe_helper_for_progtype(prog_type, supported_type,
+ define_prefix, id, ptype_name,
+ ifindex);
}
}
@@ -584,13 +601,132 @@ probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
res, define_prefix);
}
+static void
+section_system_config(enum probe_component target, const char *define_prefix)
+{
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ print_end_section();
+ break;
+ default:
+ break;
+ }
+}
+
+static bool section_syscall_config(const char *define_prefix)
+{
+ bool res;
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+ res = probe_bpf_syscall(define_prefix);
+ print_end_section();
+
+ return res;
+}
+
+static void
+section_program_types(bool *supported_types, const char *define_prefix,
+ __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_section();
+}
+
+static void section_map_types(const char *define_prefix, __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_section();
+}
+
+static void
+section_helpers(bool *supported_types, const char *define_prefix,
+ bool full_mode, __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, full_mode, ifindex);
+
+ print_end_section();
+}
+
+static void section_misc(const char *define_prefix, __u32 ifindex)
+{
+ print_start_section("misc",
+ "Scanning miscellaneous eBPF features...",
+ "/*** eBPF misc features ***/",
+ define_prefix);
+ probe_large_insn_limit(define_prefix, ifindex);
+ print_end_section();
+}
+
static int do_probe(int argc, char **argv)
{
enum probe_component target = COMPONENT_UNSPEC;
const char *define_prefix = NULL;
bool supported_types[128] = {};
+ bool full_mode = false;
__u32 ifindex = 0;
- unsigned int i;
char *ifname;
/* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
@@ -629,6 +765,9 @@ static int do_probe(int argc, char **argv)
strerror(errno));
return -1;
}
+ } else if (is_prefix(*argv, "full")) {
+ full_mode = true;
+ NEXT_ARG();
} else if (is_prefix(*argv, "macros") && !define_prefix) {
define_prefix = "";
NEXT_ARG();
@@ -658,97 +797,19 @@ static int do_probe(int argc, char **argv)
jsonw_start_object(json_wtr);
}
- switch (target) {
- case COMPONENT_KERNEL:
- case COMPONENT_UNSPEC:
- if (define_prefix)
- break;
-
- print_start_section("system_config",
- "Scanning system configuration...",
- NULL, /* define_comment never used here */
- NULL); /* define_prefix always NULL here */
- if (check_procfs()) {
- probe_unprivileged_disabled();
- probe_jit_enable();
- probe_jit_harden();
- probe_jit_kallsyms();
- probe_jit_limit();
- } else {
- p_info("/* procfs not mounted, skipping related probes */");
- }
- probe_kernel_image_config();
- if (json_output)
- jsonw_end_object(json_wtr);
- else
- printf("\n");
- break;
- default:
- break;
- }
-
- print_start_section("syscall_config",
- "Scanning system call availability...",
- "/*** System call availability ***/",
- define_prefix);
-
- if (!probe_bpf_syscall(define_prefix))
+ section_system_config(target, define_prefix);
+ if (!section_syscall_config(define_prefix))
/* bpf() syscall unavailable, don't probe other BPF features */
goto exit_close_json;
-
- print_end_then_start_section("program_types",
- "Scanning eBPF program types...",
- "/*** eBPF program types ***/",
- define_prefix);
-
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_prog_type(i, supported_types, define_prefix, ifindex);
-
- print_end_then_start_section("map_types",
- "Scanning eBPF map types...",
- "/*** eBPF map types ***/",
- define_prefix);
-
- for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
- probe_map_type(i, define_prefix, ifindex);
-
- print_end_then_start_section("helpers",
- "Scanning eBPF helper functions...",
- "/*** eBPF helper functions ***/",
- define_prefix);
-
- if (define_prefix)
- printf("/*\n"
- " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
- " * to determine if <helper_name> is available for <prog_type_name>,\n"
- " * e.g.\n"
- " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
- " * // do stuff with this helper\n"
- " * #elif\n"
- " * // use a workaround\n"
- " * #endif\n"
- " */\n"
- "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
- " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
- define_prefix, define_prefix, define_prefix,
- define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_helpers_for_progtype(i, supported_types[i],
- define_prefix, ifindex);
-
- print_end_then_start_section("misc",
- "Scanning miscellaneous eBPF features...",
- "/*** eBPF misc features ***/",
- define_prefix);
- probe_large_insn_limit(define_prefix, ifindex);
+ section_program_types(supported_types, define_prefix, ifindex);
+ section_map_types(define_prefix, ifindex);
+ section_helpers(supported_types, define_prefix, full_mode, ifindex);
+ section_misc(define_prefix, ifindex);
exit_close_json:
- if (json_output) {
- /* End current "section" of probes */
- jsonw_end_object(json_wtr);
+ if (json_output)
/* End root object */
jsonw_end_object(json_wtr);
- }
return 0;
}
@@ -761,7 +822,7 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ "Usage: %s %s probe [COMPONENT] [full] [macros [prefix PREFIX]]\n"
" %s %s help\n"
"\n"
" COMPONENT := { kernel | dev NAME }\n"
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 4e75b58d3989..724ef9d941d3 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -76,6 +76,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
+ [BPF_PROG_TYPE_TRACING] = "tracing",
+ [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_PROG_TYPE_EXT] = "ext",
};
extern const char * const map_type_name[];
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index b352ab041160..1996e67a2f00 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1573,8 +1573,8 @@ static int do_help(int argc, char **argv)
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
- " cgroup/recvmsg6 | cgroup/getsockopt |\n"
- " cgroup/setsockopt }\n"
+ " cgroup/recvmsg6 | cgroup/getsockopt | cgroup/setsockopt |\n"
+ " struct_ops | fentry | fexit | freplace }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
" " HELP_SPEC_OPTIONS "\n"
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index c4dd23c4b478..8ead55593984 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -239,7 +239,6 @@ buildid.*::
set buildid.dir to /dev/null. The default is $HOME/.debug
annotate.*::
- These options work only for TUI.
These are in control of addresses, jump function, source code
in lines of assembly code from a specific program.
@@ -269,6 +268,8 @@ annotate.*::
│ mov (%rdi),%rdx
│ return n;
+ This option works with tui, stdio2 browsers.
+
annotate.use_offset::
Basing on a first address of a loaded function, offset can be used.
Instead of using original addresses of assembly code,
@@ -287,6 +288,8 @@ annotate.*::
368:│ mov 0x8(%r14),%rdi
+ This option works with tui, stdio2 browsers.
+
annotate.jump_arrows::
There can be jump instruction among assembly code.
Depending on a boolean value of jump_arrows,
@@ -306,6 +309,8 @@ annotate.*::
│1330: mov %r15,%r10
│1333: cmp %r15,%r14
+ This option works with tui browser.
+
annotate.show_linenr::
When showing source code if this option is 'true',
line numbers are printed as below.
@@ -325,6 +330,8 @@ annotate.*::
│ array++;
│ }
+ This option works with tui, stdio2 browsers.
+
annotate.show_nr_jumps::
Let's see a part of assembly code.
@@ -335,6 +342,8 @@ annotate.*::
│1 1382: movb $0x1,-0x270(%rbp)
+ This option works with tui, stdio2 browsers.
+
annotate.show_total_period::
To compare two records on an instruction base, with this option
provided, display total number of samples that belong to a line
@@ -348,11 +357,30 @@ annotate.*::
99.93 │ mov %eax,%eax
+ This option works with tui, stdio2, stdio browsers.
+
+ annotate.show_nr_samples::
+ By default perf annotate shows percentage of samples. This option
+ can be used to print absolute number of samples. Ex, when set as
+ false:
+
+ Percent│
+ 74.03 │ mov %fs:0x28,%rax
+
+ When set as true:
+
+ Samples│
+ 6 │ mov %fs:0x28,%rax
+
+ This option works with tui, stdio2, stdio browsers.
+
annotate.offset_level::
Default is '1', meaning just jump targets will have offsets show right beside
the instruction. When set to '2' 'call' instructions will also have its offsets
shown, 3 or higher will show offsets for all instructions.
+ This option works with tui, stdio2 browsers.
+
hist.*::
hist.percentage::
This option control the way to calculate overhead of filtered entries -
@@ -490,6 +518,12 @@ top.*::
column by default.
The default is 'true'.
+ top.call-graph::
+ This is identical to 'call-graph.record-mode', except it is
+ applicable only for 'top' subcommand. This option ONLY setup
+ the unwind method. To enable 'perf top' to actually use it,
+ the command line option -g must be specified.
+
man.*::
man.viewer::
This option can assign a tool to view manual pages when 'help'
@@ -517,6 +551,16 @@ record.*::
But if this option is 'no-cache', it will not update the build-id cache.
'skip' skips post-processing and does not update the cache.
+ record.call-graph::
+ This is identical to 'call-graph.record-mode', except it is
+ applicable only for 'record' subcommand. This option ONLY setup
+ the unwind method. To enable 'perf record' to actually use it,
+ the command line option -g must be specified.
+
+ record.aio::
+ Use 'n' control blocks in asynchronous (Posix AIO) trace writing
+ mode ('n' default: 1, max: 4).
+
diff.*::
diff.order::
This option sets the number of columns to sort the result.
@@ -566,6 +610,11 @@ trace.*::
"libbeauty", the default, to use the same argument beautifiers used in the
strace-like sys_enter+sys_exit lines.
+ftrace.*::
+ ftrace.tracer::
+ Can be used to select the default tracer. Possible values are
+ 'function' and 'function_graph'.
+
llvm.*::
llvm.clang-path::
Path to clang. If omit, search it from $PATH.
@@ -610,6 +659,29 @@ scripts.*::
The script gets the same options passed as a full perf script,
in particular -i perfdata file, --cpu, --tid
+convert.*::
+
+ convert.queue-size::
+ Limit the size of ordered_events queue, so we could control
+ allocation size of perf data files without proper finished
+ round events.
+
+intel-pt.*::
+
+ intel-pt.cache-divisor::
+
+ intel-pt.mispred-all::
+ If set, Intel PT decoder will set the mispred flag on all
+ branches.
+
+auxtrace.*::
+
+ auxtrace.dumpdir::
+ s390 only. The directory to save the auxiliary trace buffer
+ can be changed using this option. Ex, auxtrace.dumpdir=/tmp.
+ If the directory does not exist or has the wrong file type,
+ the current directory is used.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2898cfdf8fe1..941f814820b8 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -858,21 +858,6 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
free(ptr);
}
-static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct cs_etm_recording *ptr =
- container_of(itr, struct cs_etm_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(ptr->evlist, evsel) {
- if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
- return perf_evlist__enable_event_idx(ptr->evlist,
- evsel, idx);
- }
-
- return -EINVAL;
-}
-
struct auxtrace_record *cs_etm_record_init(int *err)
{
struct perf_pmu *cs_etm_pmu;
@@ -892,6 +877,7 @@ struct auxtrace_record *cs_etm_record_init(int *err)
}
ptr->cs_etm_pmu = cs_etm_pmu;
+ ptr->itr.pmu = cs_etm_pmu;
ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
@@ -901,7 +887,7 @@ struct auxtrace_record *cs_etm_record_init(int *err)
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
ptr->itr.free = cs_etm_recording_free;
- ptr->itr.read_finish = cs_etm_read_finish;
+ ptr->itr.read_finish = auxtrace_record__read_finish;
*err = 0;
return &ptr->itr;
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index eba6541ec0f1..8d6821d9c3f6 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -158,20 +158,6 @@ static void arm_spe_recording_free(struct auxtrace_record *itr)
free(sper);
}
-static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct arm_spe_recording *sper =
- container_of(itr, struct arm_spe_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(sper->evlist, evsel) {
- if (evsel->core.attr.type == sper->arm_spe_pmu->type)
- return perf_evlist__enable_event_idx(sper->evlist,
- evsel, idx);
- }
- return -EINVAL;
-}
-
struct auxtrace_record *arm_spe_recording_init(int *err,
struct perf_pmu *arm_spe_pmu)
{
@@ -189,12 +175,13 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
}
sper->arm_spe_pmu = arm_spe_pmu;
+ sper->itr.pmu = arm_spe_pmu;
sper->itr.recording_options = arm_spe_recording_options;
sper->itr.info_priv_size = arm_spe_info_priv_size;
sper->itr.info_fill = arm_spe_info_fill;
sper->itr.free = arm_spe_recording_free;
sper->itr.reference = arm_spe_reference;
- sper->itr.read_finish = arm_spe_read_finish;
+ sper->itr.read_finish = auxtrace_record__read_finish;
sper->itr.alignment = 0;
*err = 0;
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 43f736ed47f2..35b61bfc1b1a 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -517,3 +517,5 @@
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
435 nospu clone3 ppc_clone3
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 27d9e214d068..26cee1052179 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -413,20 +413,6 @@ out_err:
return err;
}
-static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct intel_bts_recording *btsr =
- container_of(itr, struct intel_bts_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(btsr->evlist, evsel) {
- if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
- return perf_evlist__enable_event_idx(btsr->evlist,
- evsel, idx);
- }
- return -EINVAL;
-}
-
struct auxtrace_record *intel_bts_recording_init(int *err)
{
struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
@@ -447,6 +433,7 @@ struct auxtrace_record *intel_bts_recording_init(int *err)
}
btsr->intel_bts_pmu = intel_bts_pmu;
+ btsr->itr.pmu = intel_bts_pmu;
btsr->itr.recording_options = intel_bts_recording_options;
btsr->itr.info_priv_size = intel_bts_info_priv_size;
btsr->itr.info_fill = intel_bts_info_fill;
@@ -456,7 +443,7 @@ struct auxtrace_record *intel_bts_recording_init(int *err)
btsr->itr.find_snapshot = intel_bts_find_snapshot;
btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options;
btsr->itr.reference = intel_bts_reference;
- btsr->itr.read_finish = intel_bts_read_finish;
+ btsr->itr.read_finish = auxtrace_record__read_finish;
btsr->itr.alignment = sizeof(struct branch);
return &btsr->itr;
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 20df442fdf36..7eea4fd7ce58 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -1166,20 +1166,6 @@ static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
return rdtsc();
}
-static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct intel_pt_recording *ptr =
- container_of(itr, struct intel_pt_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(ptr->evlist, evsel) {
- if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
- return perf_evlist__enable_event_idx(ptr->evlist, evsel,
- idx);
- }
- return -EINVAL;
-}
-
struct auxtrace_record *intel_pt_recording_init(int *err)
{
struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
@@ -1200,6 +1186,7 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
}
ptr->intel_pt_pmu = intel_pt_pmu;
+ ptr->itr.pmu = intel_pt_pmu;
ptr->itr.recording_options = intel_pt_recording_options;
ptr->itr.info_priv_size = intel_pt_info_priv_size;
ptr->itr.info_fill = intel_pt_info_fill;
@@ -1209,7 +1196,7 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
ptr->itr.find_snapshot = intel_pt_find_snapshot;
ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
ptr->itr.reference = intel_pt_reference;
- ptr->itr.read_finish = intel_pt_read_finish;
+ ptr->itr.read_finish = auxtrace_record__read_finish;
/*
* Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
* should give at least 1 PSB per sample.
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index ff61795a4d13..6c0a0412502e 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -566,6 +566,8 @@ int cmd_annotate(int argc, const char **argv)
if (ret < 0)
return ret;
+ annotation_config__init(&annotate.opts);
+
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
/*
@@ -605,8 +607,6 @@ int cmd_annotate(int argc, const char **argv)
if (ret < 0)
goto out_delete;
- annotation_config__init();
-
symbol_conf.try_vmlinux_path = true;
ret = symbol__init(&annotate.session->header.env);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 26bc5923e6b5..70548df2abb9 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -449,7 +449,8 @@ static int perf_del_probe_events(struct strfilter *filter)
ret = probe_file__del_strlist(kfd, klist);
if (ret < 0)
goto error;
- }
+ } else if (ret == -ENOMEM)
+ goto error;
ret2 = probe_file__get_events(ufd, filter, ulist);
if (ret2 == 0) {
@@ -459,7 +460,8 @@ static int perf_del_probe_events(struct strfilter *filter)
ret2 = probe_file__del_strlist(ufd, ulist);
if (ret2 < 0)
goto error;
- }
+ } else if (ret2 == -ENOMEM)
+ goto error;
if (ret == -ENOENT && ret2 == -ENOENT)
pr_warning("\"%s\" does not hit any event.\n", str);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 9483b3f0cae3..72a12b69f120 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1507,7 +1507,7 @@ repeat:
symbol_conf.priv_size += sizeof(u32);
symbol_conf.sort_by_name = true;
}
- annotation_config__init();
+ annotation_config__init(&report.annotation_opts);
}
if (symbol__init(&session->header.env) < 0)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 8affcab75604..f6dd1a63f159 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -143,7 +143,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(&he->ms, evsel, 0, &top->annotation_opts, NULL);
+ err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
if (err == 0) {
top->sym_filter_entry = he;
} else {
@@ -1683,7 +1683,7 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
goto out_delete_evlist;
- annotation_config__init();
+ annotation_config__init(&top.annotation_opts);
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
status = symbol__init(NULL);
diff --git a/tools/perf/include/bpf/pid_filter.h b/tools/perf/include/bpf/pid_filter.h
index 607189a315b2..6e61c4bdf548 100644
--- a/tools/perf/include/bpf/pid_filter.h
+++ b/tools/perf/include/bpf/pid_filter.h
@@ -3,7 +3,7 @@
#ifndef _PERF_BPF_PID_FILTER_
#define _PERF_BPF_PID_FILTER_
-#include <bpf/bpf.h>
+#include <bpf.h>
#define pid_filter(name) pid_map(name, bool)
diff --git a/tools/perf/include/bpf/stdio.h b/tools/perf/include/bpf/stdio.h
index 7ca6fa5463ee..316af5b2ff35 100644
--- a/tools/perf/include/bpf/stdio.h
+++ b/tools/perf/include/bpf/stdio.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-#include <bpf/bpf.h>
+#include <bpf.h>
struct bpf_map SEC("maps") __bpf_stdout__ = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
diff --git a/tools/perf/include/bpf/unistd.h b/tools/perf/include/bpf/unistd.h
index d1a35b6c649d..ca7877f9a976 100644
--- a/tools/perf/include/bpf/unistd.h
+++ b/tools/perf/include/bpf/unistd.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
-#include <bpf/bpf.h>
+#include <bpf.h>
static int (*bpf_get_current_pid_tgid)(void) = (void *)BPF_FUNC_get_current_pid_tgid;
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 7cb99b433888..c2cc42daf924 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
fi
}
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index badbddbb30f8..9023267e5643 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -754,10 +754,9 @@ static int annotate_browser__run(struct annotate_browser *browser,
"? Search string backwards\n");
continue;
case 'r':
- {
- script_browse(NULL, NULL);
- continue;
- }
+ script_browse(NULL, NULL);
+ annotate_browser__show(&browser->b, title, help);
+ continue;
case 'k':
notes->options->show_linenr = !notes->options->show_linenr;
break;
@@ -834,13 +833,13 @@ show_sup_ins:
map_symbol__annotation_dump(ms, evsel, browser->opts);
continue;
case 't':
- if (notes->options->show_total_period) {
- notes->options->show_total_period = false;
- notes->options->show_nr_samples = true;
- } else if (notes->options->show_nr_samples)
- notes->options->show_nr_samples = false;
+ if (symbol_conf.show_total_period) {
+ symbol_conf.show_total_period = false;
+ symbol_conf.show_nr_samples = true;
+ } else if (symbol_conf.show_nr_samples)
+ symbol_conf.show_nr_samples = false;
else
- notes->options->show_total_period = true;
+ symbol_conf.show_total_period = true;
annotation__update_column_widths(notes);
continue;
case 'c':
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 22cc240f7371..35f9641bf670 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -174,7 +174,7 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
if (ms->map->dso->annotate_warned)
return -1;
- err = symbol__annotate(ms, evsel, 0, &annotation__default_options, NULL);
+ err = symbol__annotate(ms, evsel, &annotation__default_options, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ca73fb74ad03..0ea95be84b3b 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1143,93 +1143,70 @@ out:
}
struct annotate_args {
- size_t privsize;
- struct arch *arch;
- struct map_symbol ms;
- struct evsel *evsel;
+ struct arch *arch;
+ struct map_symbol ms;
+ struct evsel *evsel;
struct annotation_options *options;
- s64 offset;
- char *line;
- int line_nr;
+ s64 offset;
+ char *line;
+ int line_nr;
};
-static void annotation_line__delete(struct annotation_line *al)
+static void annotation_line__init(struct annotation_line *al,
+ struct annotate_args *args,
+ int nr)
{
- void *ptr = (void *) al - al->privsize;
+ al->offset = args->offset;
+ al->line = strdup(args->line);
+ al->line_nr = args->line_nr;
+ al->data_nr = nr;
+}
+static void annotation_line__exit(struct annotation_line *al)
+{
free_srcline(al->path);
zfree(&al->line);
- free(ptr);
}
-/*
- * Allocating the annotation line data with following
- * structure:
- *
- * --------------------------------------
- * private space | struct annotation_line
- * --------------------------------------
- *
- * Size of the private space is stored in 'struct annotation_line'.
- *
- */
-static struct annotation_line *
-annotation_line__new(struct annotate_args *args, size_t privsize)
+static size_t disasm_line_size(int nr)
{
struct annotation_line *al;
- struct evsel *evsel = args->evsel;
- size_t size = privsize + sizeof(*al);
- int nr = 1;
-
- if (perf_evsel__is_group_event(evsel))
- nr = evsel->core.nr_members;
- size += sizeof(al->data[0]) * nr;
-
- al = zalloc(size);
- if (al) {
- al = (void *) al + privsize;
- al->privsize = privsize;
- al->offset = args->offset;
- al->line = strdup(args->line);
- al->line_nr = args->line_nr;
- al->data_nr = nr;
- }
-
- return al;
+ return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
}
/*
* Allocating the disasm annotation line data with
* following structure:
*
- * ------------------------------------------------------------
- * privsize space | struct disasm_line | struct annotation_line
- * ------------------------------------------------------------
+ * -------------------------------------------
+ * struct disasm_line | struct annotation_line
+ * -------------------------------------------
*
* We have 'struct annotation_line' member as last member
* of 'struct disasm_line' to have an easy access.
- *
*/
static struct disasm_line *disasm_line__new(struct annotate_args *args)
{
struct disasm_line *dl = NULL;
- struct annotation_line *al;
- size_t privsize = args->privsize + offsetof(struct disasm_line, al);
+ int nr = 1;
- al = annotation_line__new(args, privsize);
- if (al != NULL) {
- dl = disasm_line(al);
+ if (perf_evsel__is_group_event(args->evsel))
+ nr = args->evsel->core.nr_members;
- if (dl->al.line == NULL)
- goto out_delete;
+ dl = zalloc(disasm_line_size(nr));
+ if (!dl)
+ return NULL;
- if (args->offset != -1) {
- if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
- goto out_free_line;
+ annotation_line__init(&dl->al, args, nr);
+ if (dl->al.line == NULL)
+ goto out_delete;
- disasm_line__init_ins(dl, args->arch, &args->ms);
- }
+ if (args->offset != -1) {
+ if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
+ goto out_free_line;
+
+ disasm_line__init_ins(dl, args->arch, &args->ms);
}
return dl;
@@ -1248,7 +1225,8 @@ void disasm_line__free(struct disasm_line *dl)
else
ins__delete(&dl->ops);
zfree(&dl->ins.name);
- annotation_line__delete(&dl->al);
+ annotation_line__exit(&dl->al);
+ free(dl);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
@@ -2149,13 +2127,12 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
-int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, size_t privsize,
+int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options, struct arch **parch)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
- .privsize = privsize,
.evsel = evsel,
.options = options,
};
@@ -2644,6 +2621,8 @@ void annotation__set_offsets(struct annotation *notes, s64 size)
struct annotation_line *al;
notes->max_line_len = 0;
+ notes->nr_entries = 0;
+ notes->nr_asm_entries = 0;
list_for_each_entry(al, &notes->src->source, node) {
size_t line_len = strlen(al->line);
@@ -2790,7 +2769,7 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
- if (symbol__annotate(ms, evsel, 0, opts, NULL) < 0)
+ if (symbol__annotate(ms, evsel, opts, NULL) < 0)
return -1;
symbol__calc_percent(sym, evsel);
@@ -2915,9 +2894,9 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
percent = annotation_data__percent(&al->data[i], percent_type);
obj__set_percent_color(obj, percent, current_entry);
- if (notes->options->show_total_period) {
+ if (symbol_conf.show_total_period) {
obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
- } else if (notes->options->show_nr_samples) {
+ } else if (symbol_conf.show_nr_samples) {
obj__printf(obj, "%6" PRIu64 " ",
al->data[i].he.nr_samples);
} else {
@@ -2931,8 +2910,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
obj__printf(obj, "%-*s", pcnt_width, " ");
else {
obj__printf(obj, "%-*s", pcnt_width,
- notes->options->show_total_period ? "Period" :
- notes->options->show_nr_samples ? "Samples" : "Percent");
+ symbol_conf.show_total_period ? "Period" :
+ symbol_conf.show_nr_samples ? "Samples" : "Percent");
}
}
@@ -3070,7 +3049,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
- err = symbol__annotate(ms, evsel, 0, options, parch);
+ err = symbol__annotate(ms, evsel, options, parch);
if (err)
goto out_free_offsets;
@@ -3094,69 +3073,46 @@ out_free_offsets:
return err;
}
-#define ANNOTATION__CFG(n) \
- { .name = #n, .value = &annotation__default_options.n, }
-
-/*
- * Keep the entries sorted, they are bsearch'ed
- */
-static struct annotation_config {
- const char *name;
- void *value;
-} annotation__configs[] = {
- ANNOTATION__CFG(hide_src_code),
- ANNOTATION__CFG(jump_arrows),
- ANNOTATION__CFG(offset_level),
- ANNOTATION__CFG(show_linenr),
- ANNOTATION__CFG(show_nr_jumps),
- ANNOTATION__CFG(show_nr_samples),
- ANNOTATION__CFG(show_total_period),
- ANNOTATION__CFG(use_offset),
-};
-
-#undef ANNOTATION__CFG
-
-static int annotation_config__cmp(const void *name, const void *cfgp)
-{
- const struct annotation_config *cfg = cfgp;
-
- return strcmp(name, cfg->name);
-}
-
-static int annotation__config(const char *var, const char *value,
- void *data __maybe_unused)
+static int annotation__config(const char *var, const char *value, void *data)
{
- struct annotation_config *cfg;
- const char *name;
+ struct annotation_options *opt = data;
if (!strstarts(var, "annotate."))
return 0;
- name = var + 9;
- cfg = bsearch(name, annotation__configs, ARRAY_SIZE(annotation__configs),
- sizeof(struct annotation_config), annotation_config__cmp);
-
- if (cfg == NULL)
- pr_debug("%s variable unknown, ignoring...", var);
- else if (strcmp(var, "annotate.offset_level") == 0) {
- perf_config_int(cfg->value, name, value);
-
- if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL)
- *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL;
- else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL)
- *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL;
+ if (!strcmp(var, "annotate.offset_level")) {
+ perf_config_u8(&opt->offset_level, "offset_level", value);
+
+ if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
+ opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
+ else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
+ opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
+ } else if (!strcmp(var, "annotate.hide_src_code")) {
+ opt->hide_src_code = perf_config_bool("hide_src_code", value);
+ } else if (!strcmp(var, "annotate.jump_arrows")) {
+ opt->jump_arrows = perf_config_bool("jump_arrows", value);
+ } else if (!strcmp(var, "annotate.show_linenr")) {
+ opt->show_linenr = perf_config_bool("show_linenr", value);
+ } else if (!strcmp(var, "annotate.show_nr_jumps")) {
+ opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
+ } else if (!strcmp(var, "annotate.show_nr_samples")) {
+ symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
+ value);
+ } else if (!strcmp(var, "annotate.show_total_period")) {
+ symbol_conf.show_total_period = perf_config_bool("show_total_period",
+ value);
+ } else if (!strcmp(var, "annotate.use_offset")) {
+ opt->use_offset = perf_config_bool("use_offset", value);
} else {
- *(bool *)cfg->value = perf_config_bool(name, value);
+ pr_debug("%s variable unknown, ignoring...", var);
}
+
return 0;
}
-void annotation_config__init(void)
+void annotation_config__init(struct annotation_options *opt)
{
- perf_config(annotation__config, NULL);
-
- annotation__default_options.show_total_period = symbol_conf.show_total_period;
- annotation__default_options.show_nr_samples = symbol_conf.show_nr_samples;
+ perf_config(annotation__config, opt);
}
static unsigned int parse_percent_type(char *str1, char *str2)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 455403e8fede..001258601a37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -83,8 +83,6 @@ struct annotation_options {
full_path,
show_linenr,
show_nr_jumps,
- show_nr_samples,
- show_total_period,
show_minmax_cycle,
show_asm_raw,
annotate_src;
@@ -141,7 +139,6 @@ struct annotation_line {
u64 cycles;
u64 cycles_max;
u64 cycles_min;
- size_t privsize;
char *path;
u32 idx;
int idx_asm;
@@ -309,7 +306,7 @@ static inline int annotation__cycles_width(struct annotation *notes)
static inline int annotation__pcnt_width(struct annotation *notes)
{
- return (notes->options->show_total_period ? 12 : 7) * notes->nr_events;
+ return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events;
}
static inline bool annotation_line__filter(struct annotation_line *al, struct annotation *notes)
@@ -352,7 +349,7 @@ struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct map_symbol *ms,
- struct evsel *evsel, size_t privsize,
+ struct evsel *evsel,
struct annotation_options *options,
struct arch **parch);
int symbol__annotate2(struct map_symbol *ms,
@@ -413,7 +410,7 @@ static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
}
#endif
-void annotation_config__init(void);
+void annotation_config__init(struct annotation_options *opt);
int annotate_parse_percent_type(const struct option *opt, const char *_str,
int unset);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index eb087e7df6f4..3571ce72ca28 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -629,8 +629,10 @@ int auxtrace_record__options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
- if (itr)
+ if (itr) {
+ itr->evlist = evlist;
return itr->recording_options(itr, evlist, opts);
+ }
return 0;
}
@@ -664,6 +666,24 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
return -EINVAL;
}
+int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct evsel *evsel;
+
+ if (!itr->evlist || !itr->pmu)
+ return -EINVAL;
+
+ evlist__for_each_entry(itr->evlist, evsel) {
+ if (evsel->core.attr.type == itr->pmu->type) {
+ if (evsel->disabled)
+ return 0;
+ return perf_evlist__enable_event_idx(itr->evlist, evsel,
+ idx);
+ }
+ }
+ return -EINVAL;
+}
+
/*
* Event record size is 16-bit which results in a maximum size of about 64KiB.
* Allow about 4KiB for the rest of the sample record, to give a maximum
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 749d72cd9c7b..e58ef160b599 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -29,6 +29,7 @@ struct record_opts;
struct perf_record_auxtrace_error;
struct perf_record_auxtrace_info;
struct events_stats;
+struct perf_pmu;
enum auxtrace_error_type {
PERF_AUXTRACE_ERROR_ITRACE = 1,
@@ -322,6 +323,8 @@ struct auxtrace_mmap_params {
* @read_finish: called after reading from an auxtrace mmap
* @alignment: alignment (if any) for AUX area data
* @default_aux_sample_size: default sample size for --aux sample option
+ * @pmu: associated pmu
+ * @evlist: selected events list
*/
struct auxtrace_record {
int (*recording_options)(struct auxtrace_record *itr,
@@ -346,6 +349,8 @@ struct auxtrace_record {
int (*read_finish)(struct auxtrace_record *itr, int idx);
unsigned int alignment;
unsigned int default_aux_sample_size;
+ struct perf_pmu *pmu;
+ struct evlist *evlist;
};
/**
@@ -537,6 +542,7 @@ int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm,
unsigned char *data, u64 *head, u64 *old);
u64 auxtrace_record__reference(struct auxtrace_record *itr);
+int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
off_t file_offset);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0bc9c4d7fdc5..ef38eba56ed0 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -374,6 +374,18 @@ int perf_config_int(int *dest, const char *name, const char *value)
return 0;
}
+int perf_config_u8(u8 *dest, const char *name, const char *value)
+{
+ long ret = 0;
+
+ if (!perf_parse_long(value, &ret)) {
+ bad_config(name);
+ return -1;
+ }
+ *dest = ret;
+ return 0;
+}
+
static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int ret;
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index bd0a5897c76a..c10b66dde2f3 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -29,6 +29,7 @@ typedef int (*config_fn_t)(const char *, const char *, void *);
int perf_default_config(const char *, const char *, void *);
int perf_config(config_fn_t fn, void *);
int perf_config_int(int *dest, const char *, const char *);
+int perf_config_u8(u8 *dest, const char *name, const char *value);
int perf_config_u64(u64 *dest, const char *, const char *);
int perf_config_bool(const char *, const char *);
int config_error_nonbool(const char *);
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 5003ba403345..0f5fda11675f 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -301,10 +301,15 @@ int probe_file__get_events(int fd, struct strfilter *filter,
p = strchr(ent->s, ':');
if ((p && strfilter__compare(filter, p + 1)) ||
strfilter__compare(filter, ent->s)) {
- strlist__add(plist, ent->s);
+ ret = strlist__add(plist, ent->s);
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ goto out;
+ }
ret = 0;
}
}
+out:
strlist__delete(namelist);
return ret;
@@ -511,7 +516,11 @@ static int probe_cache__load(struct probe_cache *pcache)
ret = -EINVAL;
goto out;
}
- strlist__add(entry->tevlist, buf);
+ ret = strlist__add(entry->tevlist, buf);
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ goto out;
+ }
}
}
out:
@@ -672,7 +681,12 @@ int probe_cache__add_entry(struct probe_cache *pcache,
command = synthesize_probe_trace_command(&tevs[i]);
if (!command)
goto out_err;
- strlist__add(entry->tevlist, command);
+ ret = strlist__add(entry->tevlist, command);
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ goto out_err;
+ }
+
free(command);
}
list_add_tail(&entry->node, &pcache->entries);
@@ -853,9 +867,15 @@ int probe_cache__scan_sdt(struct probe_cache *pcache, const char *pathname)
break;
}
- strlist__add(entry->tevlist, buf);
+ ret = strlist__add(entry->tevlist, buf);
+
free(buf);
entry = NULL;
+
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ break;
+ }
}
if (entry) {
list_del_init(&entry->node);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 220d04f958a6..7570e36d636d 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -30,7 +30,7 @@ my %default = (
"EMAIL_WHEN_STARTED" => 0,
"NUM_TESTS" => 1,
"TEST_TYPE" => "build",
- "BUILD_TYPE" => "randconfig",
+ "BUILD_TYPE" => "oldconfig",
"MAKE_CMD" => "make",
"CLOSE_CONSOLE_SIGNAL" => "INT",
"TIMEOUT" => 120,
@@ -1030,7 +1030,7 @@ sub __read_config {
}
if (!$skip && $rest !~ /^\s*$/) {
- die "$name: $.: Gargbage found after $type\n$_";
+ die "$name: $.: Garbage found after $type\n$_";
}
if ($skip && $type eq "TEST_START") {
@@ -1063,7 +1063,7 @@ sub __read_config {
}
if ($rest !~ /^\s*$/) {
- die "$name: $.: Gargbage found after DEFAULTS\n$_";
+ die "$name: $.: Garbage found after DEFAULTS\n$_";
}
} elsif (/^\s*INCLUDE\s+(\S+)/) {
@@ -1154,7 +1154,7 @@ sub __read_config {
# on of these sections that have SKIP defined.
# The save variable can be
# defined multiple times and the new one simply overrides
- # the prevous one.
+ # the previous one.
set_variable($lvalue, $rvalue);
} else {
@@ -1234,7 +1234,7 @@ sub read_config {
foreach my $option (keys %not_used) {
print "$option\n";
}
- print "Set IGRNORE_UNUSED = 1 to have ktest ignore unused variables\n";
+ print "Set IGNORE_UNUSED = 1 to have ktest ignore unused variables\n";
if (!read_yn "Do you want to continue?") {
exit -1;
}
@@ -1345,7 +1345,7 @@ sub eval_option {
# Check for recursive evaluations.
# 100 deep should be more than enough.
if ($r++ > 100) {
- die "Over 100 evaluations accurred with $option\n" .
+ die "Over 100 evaluations occurred with $option\n" .
"Check for recursive variables\n";
}
$prev = $option;
@@ -1383,7 +1383,7 @@ sub reboot {
} else {
# Make sure everything has been written to disk
- run_ssh("sync");
+ run_ssh("sync", 10);
if (defined($time)) {
start_monitor;
@@ -1461,7 +1461,7 @@ sub get_test_name() {
sub dodie {
- # avoid recusion
+ # avoid recursion
return if ($in_die);
$in_die = 1;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index c3bc933d437b..27666b8007ed 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -10,7 +10,7 @@
#
# Options set in the beginning of the file are considered to be
-# default options. These options can be overriden by test specific
+# default options. These options can be overridden by test specific
# options, with the following exceptions:
#
# LOG_FILE
@@ -204,7 +204,7 @@
#
# This config file can also contain "config variables".
# These are assigned with ":=" instead of the ktest option
-# assigment "=".
+# assignment "=".
#
# The difference between ktest options and config variables
# is that config variables can be used multiple times,
@@ -263,7 +263,7 @@
#### Using options in other options ####
#
# Options that are defined in the config file may also be used
-# by other options. All options are evaulated at time of
+# by other options. All options are evaluated at time of
# use (except that config variables are evaluated at config
# processing time).
#
@@ -505,7 +505,7 @@
#TEST = ssh user@machine /root/run_test
# The build type is any make config type or special command
-# (default randconfig)
+# (default oldconfig)
# nobuild - skip the clean and build step
# useconfig:/path/to/config - use the given config and run
# oldconfig on it.
@@ -707,7 +707,7 @@
# Line to define a successful boot up in console output.
# This is what the line contains, not the entire line. If you need
-# the entire line to match, then use regural expression syntax like:
+# the entire line to match, then use regular expression syntax like:
# (do not add any quotes around it)
#
# SUCCESS_LINE = ^MyBox Login:$
@@ -839,7 +839,7 @@
# (ignored if POWEROFF_ON_SUCCESS is set)
#REBOOT_ON_SUCCESS = 1
-# In case there are isses with rebooting, you can specify this
+# In case there are issues with rebooting, you can specify this
# to always powercycle after this amount of time after calling
# reboot.
# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
@@ -848,7 +848,7 @@
# (default undefined)
#POWERCYCLE_AFTER_REBOOT = 5
-# In case there's isses with halting, you can specify this
+# In case there's issues with halting, you can specify this
# to always poweroff after this amount of time after calling
# halt.
# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
@@ -972,7 +972,7 @@
#
# PATCHCHECK_START is required and is the first patch to
# test (the SHA1 of the commit). You may also specify anything
-# that git checkout allows (branch name, tage, HEAD~3).
+# that git checkout allows (branch name, tag, HEAD~3).
#
# PATCHCHECK_END is the last patch to check (default HEAD)
#
@@ -994,7 +994,7 @@
# IGNORE_WARNINGS is set for the given commit's sha1
#
# IGNORE_WARNINGS can be used to disable the failure of patchcheck
-# on a particuler commit (SHA1). You can add more than one commit
+# on a particular commit (SHA1). You can add more than one commit
# by adding a list of SHA1s that are space delimited.
#
# If BUILD_NOCLEAN is set, then make mrproper will not be run on
@@ -1093,7 +1093,7 @@
# whatever reason. (Can't reboot, want to inspect each iteration)
# Doing a BISECT_MANUAL will have the test wait for you to
# tell it if the test passed or failed after each iteration.
-# This is basicall the same as running git bisect yourself
+# This is basically the same as running git bisect yourself
# but ktest will rebuild and install the kernel for you.
#
# BISECT_CHECK = 1 (optional, default 0)
@@ -1239,7 +1239,7 @@
#
# CONFIG_BISECT_EXEC (optional)
# The config bisect is a separate program that comes with ktest.pl.
-# By befault, it will look for:
+# By default, it will look for:
# `pwd`/config-bisect.pl # the location ktest.pl was executed from.
# If it does not find it there, it will look for:
# `dirname <ktest.pl>`/config-bisect.pl # The directory that holds ktest.pl
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 61df01cdf0b2..304fdf1a21dc 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -3,4 +3,7 @@ gpiogpio-hammer
gpioinclude/
gpiolsgpio
tpm2/SpaceTest.log
-tpm2/*.pyc
+
+# Python bytecode and cache
+__pycache__/
+*.py[cod]
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 2a583196fa51..2d7f5df33f04 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -20,7 +20,7 @@ CLANG ?= clang
LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
-CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(CURDIR) -I$(APIDIR) \
+CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) -I$(CURDIR) -I$(APIDIR) \
-I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) -I$(TOOLSINCDIR) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
@@ -62,7 +62,8 @@ TEST_PROGS := test_kmod.sh \
test_tc_tunnel.sh \
test_tc_edt.sh \
test_xdping.sh \
- test_bpftool_build.sh
+ test_bpftool_build.sh \
+ test_bpftool.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 68d452bb9fd9..a1dd13b34d4b 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -509,11 +509,6 @@ static void test_syncookie(int type, sa_family_t family)
.pass_on_failure = 0,
};
- if (type != SOCK_STREAM) {
- test__skip();
- return;
- }
-
/*
* +1 for TCP-SYN and
* +1 for the TCP-ACK (ack the syncookie)
@@ -787,7 +782,7 @@ static const char *sotype_str(int sotype)
}
}
-#define TEST_INIT(fn, ...) { fn, #fn, __VA_ARGS__ }
+#define TEST_INIT(fn_, ...) { .fn = fn_, .name = #fn_, __VA_ARGS__ }
static void test_config(int sotype, sa_family_t family, bool inany)
{
@@ -795,19 +790,31 @@ static void test_config(int sotype, sa_family_t family, bool inany)
void (*fn)(int sotype, sa_family_t family);
const char *name;
bool no_inner_map;
+ int need_sotype;
} tests[] = {
- TEST_INIT(test_err_inner_map, true /* no_inner_map */),
+ TEST_INIT(test_err_inner_map,
+ .no_inner_map = true),
TEST_INIT(test_err_skb_data),
TEST_INIT(test_err_sk_select_port),
TEST_INIT(test_pass),
- TEST_INIT(test_syncookie),
+ TEST_INIT(test_syncookie,
+ .need_sotype = SOCK_STREAM),
TEST_INIT(test_pass_on_err),
TEST_INIT(test_detach_bpf),
};
char s[MAX_TEST_NAME];
const struct test *t;
+ /* SOCKMAP/SOCKHASH don't support UDP yet */
+ if (sotype == SOCK_DGRAM &&
+ (inner_map_type == BPF_MAP_TYPE_SOCKMAP ||
+ inner_map_type == BPF_MAP_TYPE_SOCKHASH))
+ return;
+
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ if (t->need_sotype && t->need_sotype != sotype)
+ continue; /* test not compatible with socket type */
+
snprintf(s, sizeof(s), "%s %s/%s %s %s",
maptype_str(inner_map_type),
family_str(family), sotype_str(sotype),
@@ -816,13 +823,6 @@ static void test_config(int sotype, sa_family_t family, bool inany)
if (!test__start_subtest(s))
continue;
- if (sotype == SOCK_DGRAM &&
- inner_map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
- /* SOCKMAP/SOCKHASH don't support UDP yet */
- test__skip();
- continue;
- }
-
setup_per_test(sotype, family, inany, t->no_inner_map);
t->fn(sotype, family);
cleanup_per_test(t->no_inner_map);
diff --git a/tools/testing/selftests/bpf/test_bpftool.py b/tools/testing/selftests/bpf/test_bpftool.py
new file mode 100644
index 000000000000..4fed2dc25c0a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool.py
@@ -0,0 +1,178 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020 SUSE LLC.
+
+import collections
+import functools
+import json
+import os
+import socket
+import subprocess
+import unittest
+
+
+# Add the source tree of bpftool and /usr/local/sbin to PATH
+cur_dir = os.path.dirname(os.path.realpath(__file__))
+bpftool_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "..", "..",
+ "tools", "bpf", "bpftool"))
+os.environ["PATH"] = bpftool_dir + ":/usr/local/sbin:" + os.environ["PATH"]
+
+
+class IfaceNotFoundError(Exception):
+ pass
+
+
+class UnprivilegedUserError(Exception):
+ pass
+
+
+def _bpftool(args, json=True):
+ _args = ["bpftool"]
+ if json:
+ _args.append("-j")
+ _args.extend(args)
+
+ return subprocess.check_output(_args)
+
+
+def bpftool(args):
+ return _bpftool(args, json=False).decode("utf-8")
+
+
+def bpftool_json(args):
+ res = _bpftool(args)
+ return json.loads(res)
+
+
+def get_default_iface():
+ for iface in socket.if_nameindex():
+ if iface[1] != "lo":
+ return iface[1]
+ raise IfaceNotFoundError("Could not find any network interface to probe")
+
+
+def default_iface(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ iface = get_default_iface()
+ return f(*args, iface, **kwargs)
+ return wrapper
+
+
+class TestBpftool(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ if os.getuid() != 0:
+ raise UnprivilegedUserError(
+ "This test suite needs root privileges")
+
+ @default_iface
+ def test_feature_dev_json(self, iface):
+ unexpected_helpers = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ ]
+ expected_keys = [
+ "syscall_config",
+ "program_types",
+ "map_types",
+ "helpers",
+ "misc",
+ ]
+
+ res = bpftool_json(["feature", "probe", "dev", iface])
+ # Check if the result has all expected keys.
+ self.assertCountEqual(res.keys(), expected_keys)
+ # Check if unexpected helpers are not included in helpers probes
+ # result.
+ for helpers in res["helpers"].values():
+ for unexpected_helper in unexpected_helpers:
+ self.assertNotIn(unexpected_helper, helpers)
+
+ def test_feature_kernel(self):
+ test_cases = [
+ bpftool_json(["feature", "probe", "kernel"]),
+ bpftool_json(["feature", "probe"]),
+ bpftool_json(["feature"]),
+ ]
+ unexpected_helpers = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ ]
+ expected_keys = [
+ "syscall_config",
+ "system_config",
+ "program_types",
+ "map_types",
+ "helpers",
+ "misc",
+ ]
+
+ for tc in test_cases:
+ # Check if the result has all expected keys.
+ self.assertCountEqual(tc.keys(), expected_keys)
+ # Check if unexpected helpers are not included in helpers probes
+ # result.
+ for helpers in tc["helpers"].values():
+ for unexpected_helper in unexpected_helpers:
+ self.assertNotIn(unexpected_helper, helpers)
+
+ def test_feature_kernel_full(self):
+ test_cases = [
+ bpftool_json(["feature", "probe", "kernel", "full"]),
+ bpftool_json(["feature", "probe", "full"]),
+ ]
+ expected_helpers = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ ]
+
+ for tc in test_cases:
+ # Check if expected helpers are included at least once in any
+ # helpers list for any program type. Unfortunately we cannot assume
+ # that they will be included in all program types or a specific
+ # subset of programs. It depends on the kernel version and
+ # configuration.
+ found_helpers = False
+
+ for helpers in tc["helpers"].values():
+ if all(expected_helper in helpers
+ for expected_helper in expected_helpers):
+ found_helpers = True
+ break
+
+ self.assertTrue(found_helpers)
+
+ def test_feature_kernel_full_vs_not_full(self):
+ full_res = bpftool_json(["feature", "probe", "full"])
+ not_full_res = bpftool_json(["feature", "probe"])
+ not_full_set = set()
+ full_set = set()
+
+ for helpers in full_res["helpers"].values():
+ for helper in helpers:
+ full_set.add(helper)
+
+ for helpers in not_full_res["helpers"].values():
+ for helper in helpers:
+ not_full_set.add(helper)
+
+ self.assertCountEqual(full_set - not_full_set,
+ {"bpf_probe_write_user", "bpf_trace_printk"})
+ self.assertCountEqual(not_full_set - full_set, set())
+
+ def test_feature_macros(self):
+ expected_patterns = [
+ r"/\*\*\* System call availability \*\*\*/",
+ r"#define HAVE_BPF_SYSCALL",
+ r"/\*\*\* eBPF program types \*\*\*/",
+ r"#define HAVE.*PROG_TYPE",
+ r"/\*\*\* eBPF map types \*\*\*/",
+ r"#define HAVE.*MAP_TYPE",
+ r"/\*\*\* eBPF helper functions \*\*\*/",
+ r"#define HAVE.*HELPER",
+ r"/\*\*\* eBPF misc features \*\*\*/",
+ ]
+
+ res = bpftool(["feature", "probe", "macros"])
+ for pattern in expected_patterns:
+ self.assertRegex(res, pattern)
diff --git a/tools/testing/selftests/bpf/test_bpftool.sh b/tools/testing/selftests/bpf/test_bpftool.sh
new file mode 100755
index 000000000000..66690778e36d
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020 SUSE LLC.
+
+python3 -m unittest -v test_bpftool.TestBpftool
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index bab1e6f1d8f1..a969c77e9456 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -6,6 +6,8 @@
#include "bpf_rlimit.h"
#include <argp.h>
#include <string.h>
+#include <signal.h>
+#include <execinfo.h> /* backtrace */
/* defined in test_progs.h */
struct test_env env = {};
@@ -617,6 +619,23 @@ int cd_flavor_subdir(const char *exec_name)
return chdir(flavor);
}
+#define MAX_BACKTRACE_SZ 128
+void crash_handler(int signum)
+{
+ void *bt[MAX_BACKTRACE_SZ];
+ size_t sz;
+
+ sz = backtrace(bt, ARRAY_SIZE(bt));
+
+ if (env.test)
+ dump_test_log(env.test, true);
+ if (env.stdout)
+ stdio_restore();
+
+ fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
+ backtrace_symbols_fd(bt, sz, STDERR_FILENO);
+}
+
int main(int argc, char **argv)
{
static const struct argp argp = {
@@ -624,8 +643,14 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
+ struct sigaction sigact = {
+ .sa_handler = crash_handler,
+ .sa_flags = SA_RESETHAND,
+ };
int err, i;
+ sigaction(SIGSEGV, &sigact, NULL);
+
err = argp_parse(&argp, argc, argv, 0, NULL, &env);
if (err)
return err;
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
index eff6393ce974..71066bc4b886 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
@@ -114,23 +114,12 @@ ping_ipv4()
ping_test $h1 192.0.2.2
}
-wait_for_packets()
-{
- local t0=$1; shift
- local prio_observe=$1; shift
-
- local t1=$(ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
- local delta=$((t1 - t0))
- echo $delta
- ((delta >= 10))
-}
-
__test_defprio()
{
local prio_install=$1; shift
local prio_observe=$1; shift
- local delta
local key
+ local t1
local i
RET=0
@@ -139,9 +128,10 @@ __test_defprio()
local t0=$(ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
mausezahn -q $h1 -d 100m -c 10 -t arp reply
- delta=$(busywait "$HIT_TIMEOUT" wait_for_packets $t0 $prio_observe)
+ t1=$(busywait "$HIT_TIMEOUT" until_counter_is ">= $((t0 + 10))" \
+ ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
- check_err $? "Default priority $prio_install/$prio_observe: Expected to capture 10 packets, got $delta."
+ check_err $? "Default priority $prio_install/$prio_observe: Expected to capture 10 packets, got $((t1 - t0))."
log_test "Default priority $prio_install/$prio_observe"
defprio_uninstall $swp1 $prio_install
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
index c9fc4d4885c1..94c37124a840 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
@@ -56,11 +56,19 @@ switch_destroy()
}
# Callback from sch_ets_tests.sh
-get_stats()
+collect_stats()
{
- local band=$1; shift
+ local -a streams=("$@")
+ local stream
- ethtool_stats_get "$h2" rx_octets_prio_$band
+ # Wait for qdisc counter update so that we don't get it mid-way through.
+ busywait_for_counter 1000 +1 \
+ qdisc_parent_stats_get $swp2 10:$((${streams[0]} + 1)) .bytes \
+ > /dev/null
+
+ for stream in ${streams[@]}; do
+ qdisc_parent_stats_get $swp2 10:$((stream + 1)) .bytes
+ done
}
bail_on_lldpad
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
index ebf7752f6d93..8f833678ac4d 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -351,7 +351,7 @@ build_backlog()
local i=0
while :; do
- local cur=$(busywait 1100 until_counter_is $((cur + 1)) \
+ local cur=$(busywait 1100 until_counter_is "> $cur" \
get_qdisc_backlog $vlan)
local diff=$((size - cur))
local pkts=$(((diff + 7999) / 8000))
@@ -481,14 +481,14 @@ do_mc_backlog_test()
start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) bc
start_tcp_traffic $h2.$vlan $(ipaddr 2 $vlan) $(ipaddr 3 $vlan) bc
- qbl=$(busywait 5000 until_counter_is 500000 \
+ qbl=$(busywait 5000 until_counter_is ">= 500000" \
get_qdisc_backlog $vlan)
check_err $? "Could not build MC backlog"
# Verify that we actually see the backlog on BUM TC. Do a busywait as
# well, performance blips might cause false fail.
local ebl
- ebl=$(busywait 5000 until_counter_is 500000 \
+ ebl=$(busywait 5000 until_counter_is ">= 500000" \
get_mc_transmit_queue $vlan)
check_err $? "MC backlog reported by qdisc not visible in ethtool"
diff --git a/tools/testing/selftests/lkdtm/.gitignore b/tools/testing/selftests/lkdtm/.gitignore
new file mode 100644
index 000000000000..f26212605b6b
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/.gitignore
@@ -0,0 +1,2 @@
+*.sh
+!run.sh
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index ecc52d4c034d..91f9aea853b1 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -23,3 +23,4 @@ so_txtime
tcp_fastopen_backup_key
nettest
fin_ack_lat
+reuseaddr_ports_exhausted \ No newline at end of file
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 287ae916ec0b..48063fd69924 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -12,6 +12,7 @@ TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_a
TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
TEST_PROGS += fin_ack_lat.sh
+TEST_PROGS += reuseaddr_ports_exhausted.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
@@ -22,6 +23,7 @@ TEST_GEN_FILES += tcp_fastopen_backup_key
TEST_GEN_FILES += fin_ack_lat
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+TEST_GEN_FILES += reuseaddr_ports_exhausted
KSFT_KHDR_INSTALL := 1
include ../lib.mk
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 60273f1bc7d9..b7616704b55e 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1041,6 +1041,27 @@ ipv6_addr_metric_test()
fi
log_test $rc 0 "Prefix route with metric on link up"
+ # verify peer metric added correctly
+ set -e
+ run_cmd "$IP -6 addr flush dev dummy2"
+ run_cmd "$IP -6 addr add dev dummy2 2001:db8:104::1 peer 2001:db8:104::2 metric 260"
+ set +e
+
+ check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
+ log_test $? 0 "Set metric with peer route on local side"
+ log_test $? 0 "User specified metric on local address"
+ check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
+ log_test $? 0 "Set metric with peer route on peer side"
+
+ set -e
+ run_cmd "$IP -6 addr change dev dummy2 2001:db8:104::1 peer 2001:db8:104::3 metric 261"
+ set +e
+
+ check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 261"
+ log_test $? 0 "Modify metric and peer address on local side"
+ check_route6 "2001:db8:104::3 dev dummy2 proto kernel metric 261"
+ log_test $? 0 "Modify metric and peer address on peer side"
+
$IP li del dummy1
$IP li del dummy2
cleanup
@@ -1457,13 +1478,20 @@ ipv4_addr_metric_test()
run_cmd "$IP addr flush dev dummy2"
run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
- run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
rc=$?
if [ $rc -eq 0 ]; then
- check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+ check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 260"
+ rc=$?
+ fi
+ log_test $rc 0 "Set metric of address with peer route"
+
+ run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.3 metric 261"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.104.3 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
rc=$?
fi
- log_test $rc 0 "Modify metric of address with peer route"
+ log_test $rc 0 "Modify metric and peer address for peer route"
$IP li del dummy1
$IP li del dummy2
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 83fd15e3e545..a4a7879b3bb9 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -277,11 +277,11 @@ wait_for_offload()
until_counter_is()
{
- local value=$1; shift
+ local expr=$1; shift
local current=$("$@")
echo $((current))
- ((current >= value))
+ ((current $expr))
}
busywait_for_counter()
@@ -290,7 +290,7 @@ busywait_for_counter()
local delta=$1; shift
local base=$("$@")
- busywait "$timeout" until_counter_is $((base + delta)) "$@"
+ busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
}
setup_wait_dev()
@@ -626,6 +626,17 @@ tc_rule_stats_get()
| jq ".[1].options.actions[].stats$selector"
}
+tc_rule_handle_stats_get()
+{
+ local id=$1; shift
+ local handle=$1; shift
+ local selector=${1:-.packets}; shift
+
+ tc -j -s filter show $id \
+ | jq ".[] | select(.options.handle == $handle) | \
+ .options.actions[0].stats$selector"
+}
+
ethtool_stats_get()
{
local dev=$1; shift
@@ -644,6 +655,16 @@ qdisc_stats_get()
| jq '.[] | select(.handle == "'"$handle"'") | '"$selector"
}
+qdisc_parent_stats_get()
+{
+ local dev=$1; shift
+ local parent=$1; shift
+ local selector=$1; shift
+
+ tc -j -s qdisc show dev "$dev" invisible \
+ | jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
+}
+
humanize()
{
local speed=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/sch_ets.sh b/tools/testing/selftests/net/forwarding/sch_ets.sh
index 40e0ad1bc4f2..e60c8b4818cc 100755
--- a/tools/testing/selftests/net/forwarding/sch_ets.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets.sh
@@ -34,11 +34,14 @@ switch_destroy()
}
# Callback from sch_ets_tests.sh
-get_stats()
+collect_stats()
{
- local stream=$1; shift
+ local -a streams=("$@")
+ local stream
- link_stats_get $h2.1$stream rx bytes
+ for stream in ${streams[@]}; do
+ qdisc_parent_stats_get $swp2 10:$((stream + 1)) .bytes
+ done
}
ets_run
diff --git a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
index 3c3b204d47e8..cdf689e99458 100644
--- a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
@@ -2,7 +2,7 @@
# Global interface:
# $put -- port under test (e.g. $swp2)
-# get_stats($band) -- A function to collect stats for band
+# collect_stats($streams...) -- A function to get stats for individual streams
# ets_start_traffic($band) -- Start traffic for this band
# ets_change_qdisc($op, $dev, $nstrict, $quanta...) -- Add or change qdisc
@@ -94,15 +94,11 @@ __ets_dwrr_test()
sleep 10
- t0=($(for stream in ${streams[@]}; do
- get_stats $stream
- done))
+ t0=($(collect_stats "${streams[@]}"))
sleep 10
- t1=($(for stream in ${streams[@]}; do
- get_stats $stream
- done))
+ t1=($(collect_stats "${streams[@]}"))
d=($(for ((i = 0; i < ${#streams[@]}; i++)); do
echo $((${t1[$i]} - ${t0[$i]}))
done))
diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh
index 64f652633585..0e18e8be6e2a 100644
--- a/tools/testing/selftests/net/forwarding/tc_common.sh
+++ b/tools/testing/selftests/net/forwarding/tc_common.sh
@@ -6,39 +6,14 @@ CHECK_TC="yes"
# Can be overridden by the configuration file. See lib.sh
TC_HIT_TIMEOUT=${TC_HIT_TIMEOUT:=1000} # ms
-__tc_check_packets()
-{
- local id=$1
- local handle=$2
- local count=$3
- local operator=$4
-
- start_time="$(date -u +%s%3N)"
- while true
- do
- cmd_jq "tc -j -s filter show $id" \
- ".[] | select(.options.handle == $handle) | \
- select(.options.actions[0].stats.packets $operator $count)" \
- &> /dev/null
- ret=$?
- if [[ $ret -eq 0 ]]; then
- return $ret
- fi
- current_time="$(date -u +%s%3N)"
- diff=$(expr $current_time - $start_time)
- if [ "$diff" -gt "$TC_HIT_TIMEOUT" ]; then
- return 1
- fi
- done
-}
-
tc_check_packets()
{
local id=$1
local handle=$2
local count=$3
- __tc_check_packets "$id" "$handle" "$count" "=="
+ busywait "$TC_HIT_TIMEOUT" until_counter_is "== $count" \
+ tc_rule_handle_stats_get "$id" "$handle" > /dev/null
}
tc_check_packets_hitting()
@@ -46,5 +21,6 @@ tc_check_packets_hitting()
local id=$1
local handle=$2
- __tc_check_packets "$id" "$handle" 0 ">"
+ busywait "$TC_HIT_TIMEOUT" until_counter_is "> 0" \
+ tc_rule_handle_stats_get "$id" "$handle" > /dev/null
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 99579c0223c1..702bab2c12da 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -34,8 +34,8 @@ extern int optind;
#define TCP_ULP 31
#endif
+static int poll_timeout = 10 * 1000;
static bool listen_mode;
-static int poll_timeout;
enum cfg_mode {
CFG_MODE_POLL,
@@ -50,11 +50,20 @@ static int cfg_sock_proto = IPPROTO_MPTCP;
static bool tcpulp_audit;
static int pf = AF_INET;
static int cfg_sndbuf;
+static int cfg_rcvbuf;
static void die_usage(void)
{
- fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] -m mode]"
- "[ -l ] [ -t timeout ] connect_address\n");
+ fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]"
+ "[-l] connect_address\n");
+ fprintf(stderr, "\t-6 use ipv6\n");
+ fprintf(stderr, "\t-t num -- set poll timeout to num\n");
+ fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n");
+ fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n");
+ fprintf(stderr, "\t-p num -- use port num\n");
+ fprintf(stderr, "\t-m [MPTCP|TCP] -- use tcp or mptcp sockets\n");
+ fprintf(stderr, "\t-s [mmap|poll] -- use poll (default) or mmap\n");
+ fprintf(stderr, "\t-u -- check mptcp ulp\n");
exit(1);
}
@@ -97,6 +106,17 @@ static void xgetaddrinfo(const char *node, const char *service,
}
}
+static void set_rcvbuf(int fd, unsigned int size)
+{
+ int err;
+
+ err = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size));
+ if (err) {
+ perror("set SO_RCVBUF");
+ exit(1);
+ }
+}
+
static void set_sndbuf(int fd, unsigned int size)
{
int err;
@@ -704,6 +724,8 @@ int main_loop(void)
check_getpeername_connect(fd);
+ if (cfg_rcvbuf)
+ set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
@@ -745,7 +767,7 @@ int parse_mode(const char *mode)
return 0;
}
-int parse_sndbuf(const char *size)
+static int parse_int(const char *size)
{
unsigned long s;
@@ -765,16 +787,14 @@ int parse_sndbuf(const char *size)
die_usage();
}
- cfg_sndbuf = s;
-
- return 0;
+ return (int)s;
}
static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6lp:s:hut:m:b:")) != -1) {
+ while ((c = getopt(argc, argv, "6lp:s:hut:m:S:R:")) != -1) {
switch (c) {
case 'l':
listen_mode = true;
@@ -802,8 +822,11 @@ static void parse_opts(int argc, char **argv)
case 'm':
cfg_mode = parse_mode(optarg);
break;
- case 'b':
- cfg_sndbuf = parse_sndbuf(optarg);
+ case 'S':
+ cfg_sndbuf = parse_int(optarg);
+ break;
+ case 'R':
+ cfg_rcvbuf = parse_int(optarg);
break;
}
}
@@ -831,6 +854,8 @@ int main(int argc, char *argv[])
if (fd < 0)
return 1;
+ if (cfg_rcvbuf)
+ set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index d573a0feb98d..acf02e156d20 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -3,7 +3,7 @@
time_start=$(date +%s)
-optstring="b:d:e:l:r:h4cm:"
+optstring="S:R:d:e:l:r:h4cm:"
ret=0
sin=""
sout=""
@@ -19,6 +19,7 @@ tc_loss=$((RANDOM%101))
tc_reorder=""
testmode=""
sndbuf=0
+rcvbuf=0
options_log=true
if [ $tc_loss -eq 100 ];then
@@ -39,7 +40,8 @@ usage() {
echo -e "\t-e: ethtool features to disable, e.g.: \"-e tso -e gso\" (default: randomly disable any of tso/gso/gro)"
echo -e "\t-4: IPv4 only: disable IPv6 tests (default: test both IPv4 and IPv6)"
echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
- echo -e "\t-b: set sndbuf value (default: use kernel default)"
+ echo -e "\t-S: set sndbuf value (default: use kernel default)"
+ echo -e "\t-R: set rcvbuf value (default: use kernel default)"
echo -e "\t-m: test mode (poll, sendfile; default: poll)"
}
@@ -73,11 +75,19 @@ while getopts "$optstring" option;do
"c")
capture=true
;;
- "b")
+ "S")
if [ $OPTARG -ge 0 ];then
sndbuf="$OPTARG"
else
- echo "-s requires numeric argument, got \"$OPTARG\"" 1>&2
+ echo "-S requires numeric argument, got \"$OPTARG\"" 1>&2
+ exit 1
+ fi
+ ;;
+ "R")
+ if [ $OPTARG -ge 0 ];then
+ rcvbuf="$OPTARG"
+ else
+ echo "-R requires numeric argument, got \"$OPTARG\"" 1>&2
exit 1
fi
;;
@@ -342,8 +352,12 @@ do_transfer()
port=$((10000+$TEST_COUNT))
TEST_COUNT=$((TEST_COUNT+1))
+ if [ "$rcvbuf" -gt 0 ]; then
+ extra_args="$extra_args -R $rcvbuf"
+ fi
+
if [ "$sndbuf" -gt 0 ]; then
- extra_args="$extra_args -b $sndbuf"
+ extra_args="$extra_args -S $sndbuf"
fi
if [ -n "$testmode" ]; then
diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
new file mode 100644
index 000000000000..7b01b7c2ec10
--- /dev/null
+++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Check if we can fully utilize 4-tuples for connect().
+ *
+ * Rules to bind sockets to the same port when all ephemeral ports are
+ * exhausted.
+ *
+ * 1. if there are TCP_LISTEN sockets on the port, fail to bind.
+ * 2. if there are sockets without SO_REUSEADDR, fail to bind.
+ * 3. if SO_REUSEADDR is disabled, fail to bind.
+ * 4. if SO_REUSEADDR is enabled and SO_REUSEPORT is disabled,
+ * succeed to bind.
+ * 5. if SO_REUSEADDR and SO_REUSEPORT are enabled and
+ * there is no socket having the both options and the same EUID,
+ * succeed to bind.
+ * 6. fail to bind.
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "../kselftest_harness.h"
+
+struct reuse_opts {
+ int reuseaddr[2];
+ int reuseport[2];
+};
+
+struct reuse_opts unreusable_opts[12] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 1},
+ {0, 0, 1, 0},
+ {0, 0, 1, 1},
+ {0, 1, 0, 0},
+ {0, 1, 0, 1},
+ {0, 1, 1, 0},
+ {0, 1, 1, 1},
+ {1, 0, 0, 0},
+ {1, 0, 0, 1},
+ {1, 0, 1, 0},
+ {1, 0, 1, 1},
+};
+
+struct reuse_opts reusable_opts[4] = {
+ {1, 1, 0, 0},
+ {1, 1, 0, 1},
+ {1, 1, 1, 0},
+ {1, 1, 1, 1},
+};
+
+int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)
+{
+ struct sockaddr_in local_addr;
+ int len = sizeof(local_addr);
+ int fd, ret;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_NE(-1, fd) TH_LOG("failed to open socket.");
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(int));
+ ASSERT_EQ(0, ret) TH_LOG("failed to setsockopt: SO_REUSEADDR.");
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuseport, sizeof(int));
+ ASSERT_EQ(0, ret) TH_LOG("failed to setsockopt: SO_REUSEPORT.");
+
+ local_addr.sin_family = AF_INET;
+ local_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+ local_addr.sin_port = 0;
+
+ if (bind(fd, (struct sockaddr *)&local_addr, len) == -1) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+TEST(reuseaddr_ports_exhausted_unreusable)
+{
+ struct reuse_opts *opts;
+ int i, j, fd[2];
+
+ for (i = 0; i < 12; i++) {
+ opts = &unreusable_opts[i];
+
+ for (j = 0; j < 2; j++)
+ fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
+
+ ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
+ EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind.");
+
+ for (j = 0; j < 2; j++)
+ if (fd[j] != -1)
+ close(fd[j]);
+ }
+}
+
+TEST(reuseaddr_ports_exhausted_reusable_same_euid)
+{
+ struct reuse_opts *opts;
+ int i, j, fd[2];
+
+ for (i = 0; i < 4; i++) {
+ opts = &reusable_opts[i];
+
+ for (j = 0; j < 2; j++)
+ fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
+
+ ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
+
+ if (opts->reuseport[0] && opts->reuseport[1]) {
+ EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind because both sockets succeed to be listened.");
+ } else {
+ EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind to connect to different destinations.");
+ }
+
+ for (j = 0; j < 2; j++)
+ if (fd[j] != -1)
+ close(fd[j]);
+ }
+}
+
+TEST(reuseaddr_ports_exhausted_reusable_different_euid)
+{
+ struct reuse_opts *opts;
+ int i, j, ret, fd[2];
+ uid_t euid[2] = {10, 20};
+
+ for (i = 0; i < 4; i++) {
+ opts = &reusable_opts[i];
+
+ for (j = 0; j < 2; j++) {
+ ret = seteuid(euid[j]);
+ ASSERT_EQ(0, ret) TH_LOG("failed to seteuid: %d.", euid[j]);
+
+ fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
+
+ ret = seteuid(0);
+ ASSERT_EQ(0, ret) TH_LOG("failed to seteuid: 0.");
+ }
+
+ ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
+ EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind because one socket can be bound in each euid.");
+
+ if (fd[1] != -1) {
+ ret = listen(fd[0], 5);
+ ASSERT_EQ(0, ret) TH_LOG("failed to listen.");
+
+ ret = listen(fd[1], 5);
+ EXPECT_EQ(-1, ret) TH_LOG("should fail to listen because only one uid reserves the port in TCP_LISTEN.");
+ }
+
+ for (j = 0; j < 2; j++)
+ if (fd[j] != -1)
+ close(fd[j]);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.sh b/tools/testing/selftests/net/reuseaddr_ports_exhausted.sh
new file mode 100755
index 000000000000..20e3a2913d06
--- /dev/null
+++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run tests when all ephemeral ports are exhausted.
+#
+# Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+
+set +x
+set -e
+
+readonly NETNS="ns-$(mktemp -u XXXXXX)"
+
+setup() {
+ ip netns add "${NETNS}"
+ ip -netns "${NETNS}" link set lo up
+ ip netns exec "${NETNS}" \
+ sysctl -w net.ipv4.ip_local_port_range="32768 32768" \
+ > /dev/null 2>&1
+ ip netns exec "${NETNS}" \
+ sysctl -w net.ipv4.ip_autobind_reuse=1 > /dev/null 2>&1
+}
+
+cleanup() {
+ ip netns del "${NETNS}"
+}
+
+trap cleanup EXIT
+setup
+
+do_test() {
+ ip netns exec "${NETNS}" ./reuseaddr_ports_exhausted
+}
+
+do_test
+echo "tests done"
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index 3a779c084d96..39559d723c41 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -2,4 +2,5 @@ pidfd_open_test
pidfd_poll_test
pidfd_test
pidfd_wait
+pidfd_fdinfo_test
pidfd_getfd_test
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index 477bc61b374a..c33a7aac27ff 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -31,6 +31,7 @@ CONFIG_NET_EMATCH_U32=m
CONFIG_NET_EMATCH_META=m
CONFIG_NET_EMATCH_TEXT=m
CONFIG_NET_EMATCH_IPSET=m
+CONFIG_NET_EMATCH_CANID=m
CONFIG_NET_EMATCH_IPT=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
@@ -57,3 +58,9 @@ CONFIG_NET_IFE_SKBMARK=m
CONFIG_NET_IFE_SKBPRIO=m
CONFIG_NET_IFE_SKBTCINDEX=m
CONFIG_NET_SCH_FIFO=y
+CONFIG_NET_SCH_ETS=m
+
+#
+## Network testing
+#
+CONFIG_CAN=m
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
index afb9187b46a7..e788c114a484 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
@@ -1054,5 +1054,225 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "b2b6",
+ "name": "Add basic filter with canid ematch and single SFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 1)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0x1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "f67f",
+ "name": "Add basic filter with canid ematch and single SFF with mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0xaabb:0x00ff)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0x2BB:0xFF\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "bd5c",
+ "name": "Add basic filter with canid ematch and multiple SFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 1 sff 2 sff 3)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0x1 sff 0x2 sff 0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "83c7",
+ "name": "Add basic filter with canid ematch and multiple SFF with masks",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0xaa:0x01 sff 0xbb:0x02 sff 0xcc:0x03)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0xAA:0x1 sff 0xBB:0x2 sff 0xCC:0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "a8f5",
+ "name": "Add basic filter with canid ematch and single EFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 1)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "98ae",
+ "name": "Add basic filter with canid ematch and single EFF with mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 0xaabb:0xf1f1)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0xAABB:0xF1F1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6056",
+ "name": "Add basic filter with canid ematch and multiple EFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 1 eff 2 eff 3)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x1 eff 0x2 eff 0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d188",
+ "name": "Add basic filter with canid ematch and multiple EFF with masks",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 0xaa:0x01 eff 0xbb:0x02 eff 0xcc:0x03)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0xAA:0x1 eff 0xBB:0x2 eff 0xCC:0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "25d1",
+ "name": "Add basic filter with canid ematch and a combination of SFF/EFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0x01 eff 0x02)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x2 sff 0x1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b438",
+ "name": "Add basic filter with canid ematch and a combination of SFF/EFF with masks",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0x01:0xf eff 0x02:0xf)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x2:0xF sff 0x1:0xF\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
}
]
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index d65a0faa46d8..eda7b624eab8 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -742,9 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_enter_irqoff();
if (has_vhe()) {
- kvm_arm_vhe_guest_enter();
ret = kvm_vcpu_run_vhe(vcpu);
- kvm_arm_vhe_guest_exit();
} else {
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
}
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index 204d210d01c2..cc94ccc68821 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -4,6 +4,7 @@
#include <kvm/arm_arch_timer.h>
#include <linux/tracepoint.h>
+#include <asm/kvm_arm.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm