summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/controller/dwc/Kconfig14
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c92
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c592
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c40
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h28
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek.c109
-rw-r--r--drivers/pci/iov.c48
-rw-r--r--drivers/pci/p2pdma.c14
-rw-r--r--drivers/pci/pci-driver.c27
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aspm.c6
-rw-r--r--drivers/pci/pcie/portdrv.h16
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/switch/switchtec.c154
21 files changed, 1029 insertions, 184 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2dcc30429e8b..ffe7895aa880 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -73,9 +73,9 @@ config PCI_PF_STUB
depends on PCI_IOV
help
Say Y or M here if you want to enable support for devices that
- require SR-IOV support, while at the same time the PF itself is
- not providing any actual services on the host itself such as
- storage or networking.
+ require SR-IOV support, while at the same time the PF (Physical
+ Function) itself is not providing any actual services on the
+ host itself such as storage or networking.
When in doubt, say N.
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 955068867c35..548c58223868 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -89,8 +89,8 @@ config PCI_EXYNOS
select PCIE_DW_HOST
config PCI_IMX6
- bool "Freescale i.MX6 PCIe controller"
- depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
+ bool "Freescale i.MX6/7 PCIe controller"
+ depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
@@ -193,6 +193,16 @@ config PCIE_HISI_STB
help
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+config PCI_MESON
+ bool "MESON PCIe controller"
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCI controller support on Amlogic
+ SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+ and therefore the driver re-uses the DesignWare core functions to
+ implement the driver.
+
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 688a0ec13121..7bcdcdf5024e 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 2cbef2d7c207..25a2b7683e55 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -27,6 +27,8 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include "pcie-designware.h"
@@ -59,6 +61,11 @@ struct imx6_pcie {
u32 tx_swing_low;
int link_gen;
struct regulator *vpcie;
+
+ /* power domain for pcie */
+ struct device *pd_pcie;
+ /* power domain for pcie phy */
+ struct device *pd_pcie_phy;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -292,6 +299,43 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 1;
}
+static int imx6_pcie_attach_pd(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct device_link *link;
+
+ /* Do nothing when in a single power domain */
+ if (dev->pm_domain)
+ return 0;
+
+ imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx6_pcie->pd_pcie))
+ return PTR_ERR(imx6_pcie->pd_pcie);
+ link = device_link_add(dev, imx6_pcie->pd_pcie,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ return -EINVAL;
+ }
+
+ imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pd_pcie_phy))
+ return PTR_ERR(imx6_pcie->pd_pcie_phy);
+
+ device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
+ return PTR_ERR(link);
+ }
+
+ return 0;
+}
+
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@@ -773,8 +817,28 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
+ struct device *dev = imx6_pcie->pci->dev;
+
+ /* Some variants have a turnoff reset in DT */
+ if (imx6_pcie->turnoff_reset) {
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+ goto pm_turnoff_sleep;
+ }
+
+ /* Others poke directly at IOMUXC registers */
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+ break;
+ default:
+ dev_err(dev, "PME_Turn_Off not implemented\n");
+ return;
+ }
/*
* Components with an upstream port must respond to
@@ -783,6 +847,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
* The standard recommends a 1-10ms timeout after which to
* proceed anyway as if acks were received.
*/
+pm_turnoff_sleep:
usleep_range(1000, 10000);
}
@@ -792,18 +857,31 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
clk_disable_unprepare(imx6_pcie->pcie_phy);
clk_disable_unprepare(imx6_pcie->pcie_bus);
- if (imx6_pcie->variant == IMX7D) {
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX7D:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ default:
+ break;
}
}
+static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
+{
+ return (imx6_pcie->variant == IMX7D ||
+ imx6_pcie->variant == IMX6SX);
+}
+
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- if (imx6_pcie->variant != IMX7D)
+ if (!imx6_pcie_supports_suspend(imx6_pcie))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
@@ -819,7 +897,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct pcie_port *pp = &imx6_pcie->pci->pp;
- if (imx6_pcie->variant != IMX7D)
+ if (!imx6_pcie_supports_suspend(imx6_pcie))
return 0;
imx6_pcie_assert_core_reset(imx6_pcie);
@@ -985,6 +1063,10 @@ static int imx6_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx6_pcie);
+ ret = imx6_pcie_attach_pd(dev);
+ if (ret)
+ return ret;
+
ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3724d3ef7008..905a0ab0e6fa 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -222,12 +222,12 @@ static const struct dw_pcie_ops dw_ls_pcie_ops = {
.link_up = ls_pcie_link_up,
};
-static struct ls_pcie_drvdata ls1021_drvdata = {
+static const struct ls_pcie_drvdata ls1021_drvdata = {
.ops = &ls1021_pcie_host_ops,
.dw_pcie_ops = &dw_ls1021_pcie_ops,
};
-static struct ls_pcie_drvdata ls1043_drvdata = {
+static const struct ls_pcie_drvdata ls1043_drvdata = {
.lut_offset = 0x10000,
.ltssm_shift = 24,
.lut_dbg = 0x7fc,
@@ -235,7 +235,7 @@ static struct ls_pcie_drvdata ls1043_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
-static struct ls_pcie_drvdata ls1046_drvdata = {
+static const struct ls_pcie_drvdata ls1046_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 24,
.lut_dbg = 0x407fc,
@@ -243,7 +243,7 @@ static struct ls_pcie_drvdata ls1046_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
-static struct ls_pcie_drvdata ls2080_drvdata = {
+static const struct ls_pcie_drvdata ls2080_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
.lut_dbg = 0x7fc,
@@ -251,7 +251,7 @@ static struct ls_pcie_drvdata ls2080_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
-static struct ls_pcie_drvdata ls2088_drvdata = {
+static const struct ls_pcie_drvdata ls2088_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
.lut_dbg = 0x407fc,
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
new file mode 100644
index 000000000000..241ebe0c4505
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amlogic MESON SoCs
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Yue Wang <yue.wang@amlogic.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
+
+/* External local bus interface registers */
+#define PLR_OFFSET 0x700
+#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
+#define FAST_LINK_MODE BIT(7)
+#define LINK_CAPABLE_MASK GENMASK(21, 16)
+#define LINK_CAPABLE_X1 BIT(16)
+
+#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
+#define NUM_OF_LANES_MASK GENMASK(12, 8)
+#define NUM_OF_LANES_X1 BIT(8)
+#define DIRECT_SPEED_CHANGE BIT(17)
+
+#define TYPE1_HDR_OFFSET 0x0
+#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
+#define PCI_IO_EN BIT(0)
+#define PCI_MEM_SPACE_EN BIT(1)
+#define PCI_BUS_MASTER_EN BIT(2)
+
+#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
+#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
+
+#define PCIE_CAP_OFFSET 0x70
+#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
+#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
+#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
+#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
+#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
+
+/* PCIe specific config registers */
+#define PCIE_CFG0 0x0
+#define APP_LTSSM_ENABLE BIT(7)
+
+#define PCIE_CFG_STATUS12 0x30
+#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6))
+#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16))
+#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11)
+
+#define PCIE_CFG_STATUS17 0x44
+#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
+
+#define WAIT_LINKUP_TIMEOUT 4000
+#define PORT_CLK_RATE 100000000UL
+#define MAX_PAYLOAD_SIZE 256
+#define MAX_READ_REQ_SIZE 256
+#define MESON_PCIE_PHY_POWERUP 0x1c
+#define PCIE_RESET_DELAY 500
+#define PCIE_SHARED_RESET 1
+#define PCIE_NORMAL_RESET 0
+
+enum pcie_data_rate {
+ PCIE_GEN1,
+ PCIE_GEN2,
+ PCIE_GEN3,
+ PCIE_GEN4
+};
+
+struct meson_pcie_mem_res {
+ void __iomem *elbi_base;
+ void __iomem *cfg_base;
+ void __iomem *phy_base;
+};
+
+struct meson_pcie_clk_res {
+ struct clk *clk;
+ struct clk *mipi_gate;
+ struct clk *port_clk;
+ struct clk *general_clk;
+};
+
+struct meson_pcie_rc_reset {
+ struct reset_control *phy;
+ struct reset_control *port;
+ struct reset_control *apb;
+};
+
+struct meson_pcie {
+ struct dw_pcie pci;
+ struct meson_pcie_mem_res mem_res;
+ struct meson_pcie_clk_res clk_res;
+ struct meson_pcie_rc_reset mrst;
+ struct gpio_desc *reset_gpio;
+};
+
+static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
+ const char *id,
+ u32 reset_type)
+{
+ struct device *dev = mp->pci.dev;
+ struct reset_control *reset;
+
+ if (reset_type == PCIE_SHARED_RESET)
+ reset = devm_reset_control_get_shared(dev, id);
+ else
+ reset = devm_reset_control_get(dev, id);
+
+ return reset;
+}
+
+static int meson_pcie_get_resets(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+ mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->phy))
+ return PTR_ERR(mrst->phy);
+ reset_control_deassert(mrst->phy);
+
+ mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
+ if (IS_ERR(mrst->port))
+ return PTR_ERR(mrst->port);
+ reset_control_deassert(mrst->port);
+
+ mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->apb))
+ return PTR_ERR(mrst->apb);
+ reset_control_deassert(mrst->apb);
+
+ return 0;
+}
+
+static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
+ struct meson_pcie *mp,
+ const char *id)
+{
+ struct device *dev = mp->pci.dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+
+ return devm_ioremap_resource(dev, res);
+}
+
+static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
+ struct meson_pcie *mp,
+ const char *id)
+{
+ struct device *dev = mp->pci.dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+ if (!res) {
+ dev_err(dev, "No REG resource %s\n", id);
+ return ERR_PTR(-ENXIO);
+ }
+
+ return devm_ioremap(dev, res->start, resource_size(res));
+}
+
+static int meson_pcie_get_mems(struct platform_device *pdev,
+ struct meson_pcie *mp)
+{
+ mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
+ if (IS_ERR(mp->mem_res.elbi_base))
+ return PTR_ERR(mp->mem_res.elbi_base);
+
+ mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
+ if (IS_ERR(mp->mem_res.cfg_base))
+ return PTR_ERR(mp->mem_res.cfg_base);
+
+ /* Meson SoC has two PCI controllers use same phy register*/
+ mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
+ if (IS_ERR(mp->mem_res.phy_base))
+ return PTR_ERR(mp->mem_res.phy_base);
+
+ return 0;
+}
+
+static void meson_pcie_power_on(struct meson_pcie *mp)
+{
+ writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+}
+
+static void meson_pcie_reset(struct meson_pcie *mp)
+{
+ struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+ reset_control_assert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+
+ reset_control_assert(mrst->port);
+ reset_control_assert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->port);
+ reset_control_deassert(mrst->apb);
+ udelay(PCIE_RESET_DELAY);
+}
+
+static inline struct clk *meson_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "set clk rate failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk\n");
+ return ERR_PTR(ret);
+ }
+
+ devm_add_action_or_reset(dev,
+ (void (*) (void *))clk_disable_unprepare,
+ clk);
+
+ return clk;
+}
+
+static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+{
+ struct device *dev = mp->pci.dev;
+ struct meson_pcie_clk_res *res = &mp->clk_res;
+
+ res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
+ if (IS_ERR(res->port_clk))
+ return PTR_ERR(res->port_clk);
+
+ res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
+ if (IS_ERR(res->mipi_gate))
+ return PTR_ERR(res->mipi_gate);
+
+ res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ if (IS_ERR(res->general_clk))
+ return PTR_ERR(res->general_clk);
+
+ res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ if (IS_ERR(res->clk))
+ return PTR_ERR(res->clk);
+
+ return 0;
+}
+
+static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+ writel(val, mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
+{
+ return readl(mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
+{
+ return readl(mp->mem_res.cfg_base + reg);
+}
+
+static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+ writel(val, mp->mem_res.cfg_base + reg);
+}
+
+static void meson_pcie_assert_reset(struct meson_pcie *mp)
+{
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 1);
+}
+
+static void meson_pcie_init_dw(struct meson_pcie *mp)
+{
+ u32 val;
+
+ val = meson_cfg_readl(mp, PCIE_CFG0);
+ val |= APP_LTSSM_ENABLE;
+ meson_cfg_writel(mp, val, PCIE_CFG0);
+
+ val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+ val &= ~LINK_CAPABLE_MASK;
+ meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+ val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+ val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
+ meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+ val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+ val &= ~NUM_OF_LANES_MASK;
+ meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+ val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+ val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
+ meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+ meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
+ meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
+}
+
+static int meson_size_to_payload(struct meson_pcie *mp, int size)
+{
+ struct device *dev = mp->pci.dev;
+
+ /*
+ * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
+ * So if input size is not 2^order alignment or less than 2^7 or bigger
+ * than 2^12, just set to default size 2^(1+7).
+ */
+ if (!is_power_of_2(size) || size < 128 || size > 4096) {
+ dev_warn(dev, "payload size %d, set to default 256\n", size);
+ return 1;
+ }
+
+ return fls(size) - 8;
+}
+
+static void meson_set_max_payload(struct meson_pcie *mp, int size)
+{
+ u32 val;
+ int max_payload_size = meson_size_to_payload(mp, size);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
+{
+ u32 val;
+ int max_rd_req_size = meson_size_to_payload(mp, size);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+ val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
+ meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static inline void meson_enable_memory_space(struct meson_pcie *mp)
+{
+ /* Set the RC Bus Master, Memory Space and I/O Space enables */
+ meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
+ PCIE_STATUS_COMMAND);
+}
+
+static int meson_pcie_establish_link(struct meson_pcie *mp)
+{
+ struct dw_pcie *pci = &mp->pci;
+ struct pcie_port *pp = &pci->pp;
+
+ meson_pcie_init_dw(mp);
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
+
+ dw_pcie_setup_rc(pp);
+ meson_enable_memory_space(mp);
+
+ meson_pcie_assert_reset(mp);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(&mp->pci.pp);
+}
+
+static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * There is a bug in the MESON AXG PCIe controller whereby software
+ * cannot program the PCI_CLASS_DEVICE register, so we must fabricate
+ * the return value in the config accessors.
+ */
+ if (where == PCI_CLASS_REVISION && size == 4)
+ *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
+ else if (where == PCI_CLASS_DEVICE && size == 2)
+ *val = PCI_CLASS_BRIDGE_PCI;
+ else if (where == PCI_CLASS_DEVICE && size == 1)
+ *val = PCI_CLASS_BRIDGE_PCI & 0xff;
+ else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
+ *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
+ int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+static int meson_pcie_link_up(struct dw_pcie *pci)
+{
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ struct device *dev = pci->dev;
+ u32 speed_okay = 0;
+ u32 cnt = 0;
+ u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
+
+ do {
+ state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+ state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
+ smlh_up = IS_SMLH_LINK_UP(state12);
+ rdlh_up = IS_RDLH_LINK_UP(state12);
+ ltssm_up = IS_LTSSM_UP(state12);
+
+ if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
+ speed_okay = 1;
+
+ if (smlh_up)
+ dev_dbg(dev, "smlh_link_up is on\n");
+ if (rdlh_up)
+ dev_dbg(dev, "rdlh_link_up is on\n");
+ if (ltssm_up)
+ dev_dbg(dev, "ltssm_up is on\n");
+ if (speed_okay)
+ dev_dbg(dev, "speed_okay\n");
+
+ if (smlh_up && rdlh_up && ltssm_up && speed_okay)
+ return 1;
+
+ cnt++;
+
+ udelay(10);
+ } while (cnt < WAIT_LINKUP_TIMEOUT);
+
+ dev_err(dev, "error: wait linkup timeout\n");
+ return 0;
+}
+
+static int meson_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ int ret;
+
+ ret = meson_pcie_establish_link(mp);
+ if (ret)
+ return ret;
+
+ meson_pcie_enable_interrupts(mp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops meson_pcie_host_ops = {
+ .rd_own_conf = meson_pcie_rd_own_conf,
+ .wr_own_conf = meson_pcie_wr_own_conf,
+ .host_init = meson_pcie_host_init,
+};
+
+static int meson_add_pcie_port(struct meson_pcie *mp,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &mp->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (pp->msi_irq < 0) {
+ dev_err(dev, "failed to get MSI IRQ\n");
+ return pp->msi_irq;
+ }
+ }
+
+ pp->ops = &meson_pcie_host_ops;
+ pci->dbi_base = mp->mem_res.elbi_base;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = meson_pcie_link_up,
+};
+
+static int meson_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct meson_pcie *mp;
+ int ret;
+
+ mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ pci = &mp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mp->reset_gpio)) {
+ dev_err(dev, "get reset gpio failed\n");
+ return PTR_ERR(mp->reset_gpio);
+ }
+
+ ret = meson_pcie_get_resets(mp);
+ if (ret) {
+ dev_err(dev, "get reset resource failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_get_mems(pdev, mp);
+ if (ret) {
+ dev_err(dev, "get memory resource failed, %d\n", ret);
+ return ret;
+ }
+
+ meson_pcie_power_on(mp);
+ meson_pcie_reset(mp);
+
+ ret = meson_pcie_probe_clocks(mp);
+ if (ret) {
+ dev_err(dev, "init clock resources failed, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mp);
+
+ ret = meson_add_pcie_port(mp, pdev);
+ if (ret < 0) {
+ dev_err(dev, "Add PCIe port failed, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id meson_pcie_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pcie",
+ },
+ {},
+};
+
+static struct platform_driver meson_pcie_driver = {
+ .probe = meson_pcie_probe,
+ .driver = {
+ .name = "meson-pcie",
+ .of_match_table = meson_pcie_of_match,
+ },
+};
+
+builtin_platform_driver(meson_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 0c389a30ef5d..b171b6bc15c8 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -22,6 +22,7 @@
#include <linux/resource.h>
#include <linux/of_pci.h>
#include <linux/of_irq.h>
+#include <linux/gpio/consumer.h>
#include "pcie-designware.h"
@@ -29,6 +30,7 @@ struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
+ struct gpio_desc *reset_gpio;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -137,6 +139,12 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
+ if (pcie->reset_gpio) {
+ /* assert and then deassert the reset signal */
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ msleep(100);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ }
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pcie);
@@ -249,6 +257,14 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail_clkreg;
}
+ /* Get reset gpio signal and hold asserted (logically high) */
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
+ goto fail_clkreg;
+ }
+
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1e7b02221eac..880210366e71 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -504,6 +504,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
+ if (pci->iatu_unroll_enabled && !pci->atu_base) {
+ dev_err(dev, "atu_base is not populated\n");
+ return -EINVAL;
+ }
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 29a05759a294..721d60a5d9e4 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
(i * MAX_MSI_IRQS_PER_CTRL) +
pos);
generic_handle_irq(irq);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, 1 << pos);
pos++;
}
}
@@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data)
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] &= ~(1 << bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
- pp->irq_status[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ ~pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] |= 1 << bit;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
- pp->irq_status[ctrl]);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+ ~pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct msi_desc *msi = irq_data_get_msi_desc(d);
- struct pcie_port *pp;
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- pp = msi_desc_to_pci_sysdata(msi);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
if (pp->ops->msi_irq_ack)
pp->ops->msi_irq_ack(d->hwirq, pp);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, ~0);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &pp->irq_status[ctrl]);
+ 4, ~0);
+ pp->irq_status[ctrl] = 0;
+ }
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
@@ -699,6 +710,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
+ if (pci->iatu_unroll_enabled && !pci->atu_base)
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 2153956a0b20..93ef8c31fb39 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -93,7 +93,7 @@ static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
- return dw_pcie_readl_dbi(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, offset + reg);
}
static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
@@ -101,7 +101,7 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
- dw_pcie_writel_dbi(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, offset + reg, val);
}
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
@@ -187,7 +187,7 @@ static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
- return dw_pcie_readl_dbi(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, offset + reg);
}
static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
@@ -195,7 +195,7 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
- dw_pcie_writel_dbi(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, offset + reg, val);
}
static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 0989d880ac46..9943d8c68335 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -92,12 +92,20 @@
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+
/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((0x3 << 20) | ((region) << 9))
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
+ ((region) << 9)
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+ (((region) << 9) | (0x1 << 8))
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@@ -219,6 +227,8 @@ struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
+ /* Used when iatu_unroll_enabled is true */
+ void __iomem *atu_base;
u32 num_viewport;
u8 iatu_unroll_enabled;
struct pcie_port pp;
@@ -289,6 +299,16 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
}
+static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
+}
+
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 7b32e619b959..954bc2b74bbc 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -202,7 +202,7 @@ static int histb_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static struct dw_pcie_host_ops histb_pcie_host_ops = {
+static const struct dw_pcie_host_ops histb_pcie_host_ops = {
.rd_own_conf = histb_pcie_rd_own_conf,
.wr_own_conf = histb_pcie_wr_own_conf,
.host_init = histb_pcie_host_init,
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index d069a76cbb95..55e471c18e8d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -161,7 +161,6 @@ struct mtk_pcie_soc {
* @obff_ck: pointer to OBFF functional block operating clock
* @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
* @phy: pointer to PHY control block
- * @lane: lane count
* @slot: port slot
* @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
@@ -182,7 +181,6 @@ struct mtk_pcie_port {
struct clk *obff_ck;
struct clk *pipe_ck;
struct phy *phy;
- u32 lane;
u32 slot;
int irq;
struct irq_domain *irq_domain;
@@ -197,29 +195,20 @@ struct mtk_pcie_port {
* @dev: pointer to PCIe device
* @base: IO mapped register base
* @free_ck: free-run reference clock
- * @io: IO resource
- * @pio: PIO resource
* @mem: non-prefetchable memory resource
- * @busn: bus range
- * @offset: IO / Memory offset
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
+ * @busnr: root bus number
*/
struct mtk_pcie {
struct device *dev;
void __iomem *base;
struct clk *free_ck;
- struct resource io;
- struct resource pio;
struct resource mem;
- struct resource busn;
- struct {
- resource_size_t mem;
- resource_size_t io;
- } offset;
struct list_head ports;
const struct mtk_pcie_soc *soc;
+ unsigned int busnr;
};
static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
@@ -904,12 +893,6 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
if (!port)
return -ENOMEM;
- err = of_property_read_u32(node, "num-lanes", &port->lane);
- if (err) {
- dev_err(dev, "missing num-lanes property\n");
- return err;
- }
-
snprintf(name, sizeof(name), "port%d", slot);
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
port->base = devm_ioremap_resource(dev, regs);
@@ -1045,55 +1028,43 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node, *child;
- struct of_pci_range_parser parser;
- struct of_pci_range range;
- struct resource res;
struct mtk_pcie_port *port, *tmp;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+ struct resource_entry *win, *tmp_win;
+ resource_size_t io_base;
int err;
- if (of_pci_range_parser_init(&parser, node)) {
- dev_err(dev, "missing \"ranges\" property\n");
- return -EINVAL;
- }
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ windows, &io_base);
+ if (err)
+ return err;
- for_each_of_pci_range(&parser, &range) {
- err = of_pci_range_to_resource(&range, node, &res);
- if (err < 0)
- return err;
+ err = devm_request_pci_bus_resources(dev, windows);
+ if (err < 0)
+ return err;
- switch (res.flags & IORESOURCE_TYPE_BITS) {
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp_win, windows) {
+ switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pcie->offset.io = res.start - range.pci_addr;
-
- memcpy(&pcie->pio, &res, sizeof(res));
- pcie->pio.name = node->full_name;
-
- pcie->io.start = range.cpu_addr;
- pcie->io.end = range.cpu_addr + range.size - 1;
- pcie->io.flags = IORESOURCE_MEM;
- pcie->io.name = "I/O";
-
- memcpy(&res, &pcie->io, sizeof(res));
+ err = devm_pci_remap_iospace(dev, win->res, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, win->res);
+ resource_list_destroy_entry(win);
+ }
break;
-
case IORESOURCE_MEM:
- pcie->offset.mem = res.start - range.pci_addr;
-
- memcpy(&pcie->mem, &res, sizeof(res));
+ memcpy(&pcie->mem, win->res, sizeof(*win->res));
pcie->mem.name = "non-prefetchable";
break;
+ case IORESOURCE_BUS:
+ pcie->busnr = win->res->start;
+ break;
}
}
- err = of_pci_parse_bus_range(node, &pcie->busn);
- if (err < 0) {
- dev_err(dev, "failed to parse bus ranges property: %d\n", err);
- pcie->busn.name = node->name;
- pcie->busn.start = 0;
- pcie->busn.end = 0xff;
- pcie->busn.flags = IORESOURCE_BUS;
- }
-
for_each_available_child_of_node(node, child) {
int slot;
@@ -1125,28 +1096,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
return 0;
}
-static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
-{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct list_head *windows = &host->windows;
- struct device *dev = pcie->dev;
- int err;
-
- pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
- pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
- pci_add_resource(windows, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0)
- return err;
-
- err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
- if (err)
- return err;
-
- return 0;
-}
-
static int mtk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1169,11 +1118,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
if (err)
return err;
- err = mtk_pcie_request_resources(pcie);
- if (err)
- goto put_resources;
-
- host->busnr = pcie->busn.start;
+ host->busnr = pcie->busnr;
host->dev.parent = pcie->dev;
host->ops = pcie->soc->ops;
host->map_irq = of_irq_parse_and_map_pci;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 9616eca3182f..3aa115ed3a65 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -252,6 +252,27 @@ int __weak pcibios_sriov_disable(struct pci_dev *pdev)
return 0;
}
+static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
+{
+ unsigned int i;
+ int rc;
+
+ if (dev->no_vf_scan)
+ return 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ rc = pci_iov_add_virtfn(dev, i);
+ if (rc)
+ goto failed;
+ }
+ return 0;
+failed:
+ while (i--)
+ pci_iov_remove_virtfn(dev, i);
+
+ return rc;
+}
+
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
@@ -337,21 +358,15 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
msleep(100);
pci_cfg_access_unlock(dev);
- for (i = 0; i < initial; i++) {
- rc = pci_iov_add_virtfn(dev, i);
- if (rc)
- goto failed;
- }
+ rc = sriov_add_vfs(dev, initial);
+ if (rc)
+ goto err_pcibios;
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
iov->num_VFs = nr_virtfn;
return 0;
-failed:
- while (i--)
- pci_iov_remove_virtfn(dev, i);
-
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
@@ -368,17 +383,26 @@ err_pcibios:
return rc;
}
-static void sriov_disable(struct pci_dev *dev)
+static void sriov_del_vfs(struct pci_dev *dev)
{
- int i;
struct pci_sriov *iov = dev->sriov;
+ int i;
- if (!iov->num_VFs)
+ if (dev->no_vf_scan)
return;
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
+}
+
+static void sriov_disable(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!iov->num_VFs)
+ return;
+ sriov_del_vfs(dev);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index ae3c5b25dcc7..c25a69fc4fc7 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -422,7 +422,7 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider,
*
* Returns -1 if any of the clients are not compatible (behind the same
* root port as the provider), otherwise returns a positive number where
- * a lower number is the preferrable choice. (If there's one client
+ * a lower number is the preferable choice. (If there's one client
* that's the same as the provider it will return 0, which is best choice).
*
* For now, "compatible" means the provider and the clients are all behind
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(pci_has_p2pmem);
* @num_clients: number of client devices in the list
*
* If multiple devices are behind the same switch, the one "closest" to the
- * client devices in use will be chosen first. (So if one of the providers are
+ * client devices in use will be chosen first. (So if one of the providers is
* the same as one of the clients, that provider will be used ahead of any
* other providers that are unrelated). If multiple providers are an equal
* distance away, one will be chosen at random.
@@ -580,7 +580,7 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
* pci_free_p2pmem - free peer-to-peer DMA memory
* @pdev: the device the memory was allocated from
* @addr: address of the memory that was allocated
- * @size: number of bytes that was allocated
+ * @size: number of bytes that were allocated
*/
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
{
@@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
* @nents: the number of SG entries in the list
* @length: number of bytes to allocate
*
- * Returns 0 on success
+ * Return: %NULL on error or &struct scatterlist pointer and @nents on success
*/
struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length)
@@ -673,7 +673,7 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
*
* Published memory can be used by other PCI device drivers for
* peer-2-peer DMA operations. Non-published memory is reserved for
- * exlusive use of the device driver that registers the peer-to-peer
+ * exclusive use of the device driver that registers the peer-to-peer
* memory.
*/
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
@@ -733,7 +733,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);
* @use_p2pdma: returns whether to enable p2pdma or not
*
* Parses an attribute value to decide whether to enable p2pdma.
- * The value can select a PCI device (using it's full BDF device
+ * The value can select a PCI device (using its full BDF device
* name) or a boolean (in any format strtobool() accepts). A false
* value disables p2pdma, a true value expects the caller
* to automatically find a compatible device and specifying a PCI device
@@ -784,7 +784,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
* whether p2pdma is enabled
* @page: contents of the stored value
* @p2p_dev: the selected p2p device (NULL if no device is selected)
- * @use_p2pdma: whether p2pdme has been enabled
+ * @use_p2pdma: whether p2pdma has been enabled
*
* Attributes that use pci_p2pdma_enable_store() should use this function
* to show the value of the attribute.
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bef17c3fca67..33f3f475e5c6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev)
return 0;
}
- if (!pm || !pm->runtime_suspend)
- return -ENOSYS;
-
pci_dev->state_saved = false;
- error = pm->runtime_suspend(dev);
- if (error) {
+ if (pm && pm->runtime_suspend) {
+ error = pm->runtime_suspend(dev);
/*
* -EBUSY and -EAGAIN is used to request the runtime PM core
* to schedule a new suspend, so log the event only with debug
* log level.
*/
- if (error == -EBUSY || error == -EAGAIN)
+ if (error == -EBUSY || error == -EAGAIN) {
dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
pm->runtime_suspend, error);
- else
+ return error;
+ } else if (error) {
dev_err(dev, "can't suspend (%pf returned %d)\n",
pm->runtime_suspend, error);
-
- return error;
+ return error;
+ }
}
pci_fixup_device(pci_fixup_suspend, pci_dev);
- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ if (pm && pm->runtime_suspend
+ && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
"PCI PM: State of device not saved by %pF\n",
@@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
static int pci_pm_runtime_resume(struct device *dev)
{
- int rc;
+ int rc = 0;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
@@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev)
if (!pci_dev->driver)
return 0;
- if (!pm || !pm->runtime_resume)
- return -ENOSYS;
-
pci_fixup_device(pci_fixup_resume_early, pci_dev);
pci_enable_wake(pci_dev, PCI_D0, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
- rc = pm->runtime_resume(dev);
+ if (pm && pm->runtime_resume)
+ rc = pm->runtime_resume(dev);
pci_dev->runtime_d3cold = false;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 662b7457db23..224d88634115 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -2,6 +2,8 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
+#include <linux/pci.h>
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index dcb29cb76dc6..2330ebf6df72 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -53,8 +53,6 @@ struct pcie_link_state {
struct pcie_link_state *root; /* pointer to the root port link */
struct pcie_link_state *parent; /* pointer to the parent Link state */
struct list_head sibling; /* node in link_list */
- struct list_head children; /* list of child link states */
- struct list_head link; /* node in parent's children list */
/* ASPM state */
u32 aspm_support:7; /* Supported ASPM state */
@@ -850,8 +848,6 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
return NULL;
INIT_LIST_HEAD(&link->sibling);
- INIT_LIST_HEAD(&link->children);
- INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
link->downstream = pci_function_0(pdev->subordinate);
@@ -877,7 +873,6 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
link->parent = parent;
link->root = link->parent->root;
- list_add(&link->link, &parent->children);
}
list_add(&link->sibling, &link_list);
@@ -1001,7 +996,6 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
/* All functions are removed, so just disable ASPM for the link */
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
- list_del(&link->link);
/* Clock PM is for endpoint device */
free_link_state(link);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index e495f04394d0..fbbf00b0992e 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -71,19 +71,19 @@ static inline void *get_service_data(struct pcie_device *dev)
struct pcie_port_service_driver {
const char *name;
- int (*probe) (struct pcie_device *dev);
- void (*remove) (struct pcie_device *dev);
- int (*suspend) (struct pcie_device *dev);
- int (*resume_noirq) (struct pcie_device *dev);
- int (*resume) (struct pcie_device *dev);
- int (*runtime_suspend) (struct pcie_device *dev);
- int (*runtime_resume) (struct pcie_device *dev);
+ int (*probe)(struct pcie_device *dev);
+ void (*remove)(struct pcie_device *dev);
+ int (*suspend)(struct pcie_device *dev);
+ int (*resume_noirq)(struct pcie_device *dev);
+ int (*resume)(struct pcie_device *dev);
+ int (*runtime_suspend)(struct pcie_device *dev);
+ int (*runtime_resume)(struct pcie_device *dev);
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
/* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link) (struct pci_dev *dev);
+ pci_ers_result_t (*reset_link)(struct pci_dev *dev);
int port_type; /* Type of the port this driver can handle */
u32 service; /* Port service this device represents */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4700d24e5d55..b0a413f3f7ca 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -619,6 +619,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
/*
+ * Synopsys USB 3.x host HAPS platform has a class code of
+ * PCI_CLASS_SERIAL_USB_XHCI, and xhci driver can claim it. However, these
+ * devices should use dwc3-haps driver. Change these devices' class code to
+ * PCI_CLASS_SERIAL_USB_DEVICE to prevent the xhci-pci driver from claiming
+ * them.
+ */
+static void quirk_synopsys_haps(struct pci_dev *pdev)
+{
+ u32 class = pdev->class;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
+ case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
+ case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
+ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+ pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+ class, pdev->class);
+ break;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
+ quirk_synopsys_haps);
+
+/*
* Let's make the southbridge information explicit instead of having to
* worry about people probing the ACPI areas, for example.. (Yes, it
* happens, and if you read the wrong ACPI register it will put the machine
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 54a8b30dda38..6c5536d3d42a 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
-
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/nospec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@@ -25,6 +25,11 @@ static int max_devices = 16;
module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
+static bool use_dma_mrpc = 1;
+module_param(use_dma_mrpc, bool, 0644);
+MODULE_PARM_DESC(use_dma_mrpc,
+ "Enable the use of the DMA MRPC feature");
+
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
@@ -113,6 +118,19 @@ static void stuser_set_state(struct switchtec_user *stuser,
static void mrpc_complete_cmd(struct switchtec_dev *stdev);
+static void flush_wc_buf(struct switchtec_dev *stdev)
+{
+ struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
+
+ /*
+ * odb (outbound doorbell) register is processed by low latency
+ * hardware and w/o side effect
+ */
+ mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
+ SWITCHTEC_NTB_REG_DBMSG_OFFSET;
+ ioread32(&mmio_dbmsg->odb);
+}
+
static void mrpc_cmd_submit(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
@@ -128,16 +146,18 @@ static void mrpc_cmd_submit(struct switchtec_dev *stdev)
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
+ if (stdev->dma_mrpc) {
+ stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
+ memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
+ }
+
stuser_set_state(stuser, MRPC_RUNNING);
stdev->mrpc_busy = 1;
memcpy_toio(&stdev->mmio_mrpc->input_data,
stuser->data, stuser->data_len);
+ flush_wc_buf(stdev);
iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
- stuser->status = ioread32(&stdev->mmio_mrpc->status);
- if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
- mrpc_complete_cmd(stdev);
-
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
}
@@ -170,7 +190,11 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
list);
- stuser->status = ioread32(&stdev->mmio_mrpc->status);
+ if (stdev->dma_mrpc)
+ stuser->status = stdev->dma_mrpc->status;
+ else
+ stuser->status = ioread32(&stdev->mmio_mrpc->status);
+
if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
return;
@@ -180,13 +204,19 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
goto out;
- stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
+ if (stdev->dma_mrpc)
+ stuser->return_code = stdev->dma_mrpc->rtn_code;
+ else
+ stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
if (stuser->return_code != 0)
goto out;
- memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
- stuser->read_len);
-
+ if (stdev->dma_mrpc)
+ memcpy(stuser->data, &stdev->dma_mrpc->data,
+ stuser->read_len);
+ else
+ memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
+ stuser->read_len);
out:
complete_all(&stuser->comp);
list_del_init(&stuser->list);
@@ -221,7 +251,10 @@ static void mrpc_timeout_work(struct work_struct *work)
mutex_lock(&stdev->mrpc_mutex);
- status = ioread32(&stdev->mmio_mrpc->status);
+ if (stdev->dma_mrpc)
+ status = stdev->dma_mrpc->status;
+ else
+ status = ioread32(&stdev->mmio_mrpc->status);
if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
schedule_delayed_work(&stdev->mrpc_timeout,
msecs_to_jiffies(500));
@@ -229,7 +262,6 @@ static void mrpc_timeout_work(struct work_struct *work)
}
mrpc_complete_cmd(stdev);
-
out:
mutex_unlock(&stdev->mrpc_mutex);
}
@@ -800,6 +832,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
{
int ret;
int nr_idxs;
+ unsigned int event_flags;
struct switchtec_ioctl_event_ctl ctl;
if (copy_from_user(&ctl, uctl, sizeof(ctl)))
@@ -821,7 +854,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
else
return -EINVAL;
+ event_flags = ctl.flags;
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
+ ctl.flags = event_flags;
ret = event_ctl(stdev, &ctl);
if (ret < 0)
return ret;
@@ -1017,10 +1052,24 @@ static void enable_link_state_events(struct switchtec_dev *stdev)
}
}
+static void enable_dma_mrpc(struct switchtec_dev *stdev)
+{
+ writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
+ flush_wc_buf(stdev);
+ iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
+}
+
static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
+ if (stdev->dma_mrpc) {
+ iowrite32(0, &stdev->mmio_mrpc->dma_en);
+ flush_wc_buf(stdev);
+ writeq(0, &stdev->mmio_mrpc->dma_addr);
+ dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+ stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+ }
kfree(stdev);
}
@@ -1176,10 +1225,27 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
return ret;
}
+
+static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
+{
+ struct switchtec_dev *stdev = dev;
+ irqreturn_t ret = IRQ_NONE;
+
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+ SWITCHTEC_EVENT_EN_IRQ,
+ &stdev->mmio_part_cfg->mrpc_comp_hdr);
+ schedule_work(&stdev->mrpc_work);
+
+ ret = IRQ_HANDLED;
+ return ret;
+}
+
static int switchtec_init_isr(struct switchtec_dev *stdev)
{
int nvecs;
int event_irq;
+ int dma_mrpc_irq;
+ int rc;
nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
@@ -1194,9 +1260,29 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
if (event_irq < 0)
return event_irq;
- return devm_request_irq(&stdev->pdev->dev, event_irq,
+ rc = devm_request_irq(&stdev->pdev->dev, event_irq,
switchtec_event_isr, 0,
KBUILD_MODNAME, stdev);
+
+ if (rc)
+ return rc;
+
+ if (!stdev->dma_mrpc)
+ return rc;
+
+ dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
+ if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
+ return -EFAULT;
+
+ dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
+ if (dma_mrpc_irq < 0)
+ return dma_mrpc_irq;
+
+ rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
+ switchtec_dma_mrpc_isr, 0,
+ KBUILD_MODNAME, stdev);
+
+ return rc;
}
static void init_pff(struct switchtec_dev *stdev)
@@ -1232,19 +1318,38 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
struct pci_dev *pdev)
{
int rc;
+ void __iomem *map;
+ unsigned long res_start, res_len;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
return rc;
pci_set_master(pdev);
- stdev->mmio = pcim_iomap_table(pdev)[0];
- stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
+ res_start = pci_resource_start(pdev, 0);
+ res_len = pci_resource_len(pdev, 0);
+
+ if (!devm_request_mem_region(&pdev->dev, res_start,
+ res_len, KBUILD_MODNAME))
+ return -EBUSY;
+
+ stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
+ SWITCHTEC_GAS_TOP_CFG_OFFSET);
+ if (!stdev->mmio_mrpc)
+ return -ENOMEM;
+
+ map = devm_ioremap(&pdev->dev,
+ res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
+ res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
+ if (!map)
+ return -ENOMEM;
+
+ stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
@@ -1262,6 +1367,19 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
pci_set_drvdata(pdev, stdev);
+ if (!use_dma_mrpc)
+ return 0;
+
+ if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
+ return 0;
+
+ stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev,
+ sizeof(*stdev->dma_mrpc),
+ &stdev->dma_mrpc_dma_addr,
+ GFP_KERNEL);
+ if (stdev->dma_mrpc == NULL)
+ return -ENOMEM;
+
return 0;
}
@@ -1293,6 +1411,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
enable_link_state_events(stdev);
+ if (stdev->dma_mrpc)
+ enable_dma_mrpc(stdev);
+
rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc)
goto err_devadd;
@@ -1318,7 +1439,6 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
cdev_device_del(&stdev->cdev, &stdev->dev);
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
-
stdev_kill(stdev);
put_device(&stdev->dev);
}