diff options
author | David S. Miller <davem@davemloft.net> | 2016-12-03 15:46:51 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-12-03 15:46:51 -0500 |
commit | 1e8c86a6bb4c0537b803cd70d857f49583a877a1 (patch) | |
tree | 187ea03e8fdb4fdc7def8bc25212479d0ed9c3dc | |
parent | 397c5ad153f0ea62023accb67b3d2b07e5039948 (diff) | |
parent | 0296fe4da6505bb9eb36f85f157b0219ef1e2e04 (diff) |
Merge branch 'xgene-jumbo-and-pause-frame'
Iyappan Subramanian says:
====================
drivers: net: xgene: Add Jumbo and Pause frame support
This patch set adds,
1. Jumbo frame support
2. Pause frame based flow control
and fixes RSS for non-TCP/UDP packets.
====================
Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_cle.c | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_cle.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c | 70 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 140 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 336 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c | 146 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | 121 | ||||
-rw-r--r-- | drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h | 9 |
11 files changed, 895 insertions, 98 deletions
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index 23d72af83d82..1dc6c20cd82b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -52,6 +52,7 @@ static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, { buf[0] = SET_VAL(CLE_DROP, dbptr->drop); buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | + SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) | SET_VAL(CLE_DSTQIDL, dbptr->dstqid); buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | @@ -346,11 +347,15 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { idx = i % pdata->rxq_cnt; pool_id = pdata->rx_ring[idx]->buf_pool->id; - fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + fpsel = xgene_enet_get_fpsel(pool_id); dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); nfpsel = 0; - idt_reg = 0; + if (pdata->rx_ring[idx]->page_pool) { + pool_id = pdata->rx_ring[idx]->page_pool->id; + nfpsel = xgene_enet_get_fpsel(pool_id); + } + idt_reg = 0; xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg); ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, RSS_IDT, CLE_CMD_WR); @@ -400,9 +405,9 @@ static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) { struct xgene_enet_cle *enet_cle = &pdata->cle; + u32 def_qid, def_fpsel, def_nxtfpsel, pool_id; struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; struct xgene_cle_ptree_branch *br; - u32 def_qid, def_fpsel, pool_id; struct xgene_cle_ptree *ptree; struct xgene_cle_ptree_kn kn; int ret; @@ -480,11 +485,11 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) }, { .valid = 0, - .next_packet_pointer = 260, + .next_packet_pointer = 26, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, - .next_node = LAST_NODE, + .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 0, .data = 0x0, .mask = 0xffff @@ -662,6 +667,92 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) } }, { + /* RSS_IPV4_OTHERS_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_OTHERS_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_OTHERS_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_OTHERS_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_OTHERS_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_OTHERS_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 260, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + + { /* LAST NODE */ .node_type = EWDN, .last_node = 1, @@ -706,14 +797,21 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); pool_id = pdata->rx_ring[0]->buf_pool->id; - def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + def_fpsel = xgene_enet_get_fpsel(pool_id); + def_nxtfpsel = 0; + if (pdata->rx_ring[0]->page_pool) { + pool_id = pdata->rx_ring[0]->page_pool->id; + def_nxtfpsel = xgene_enet_get_fpsel(pool_id); + } memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; + dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel; dbptr[DB_RES_ACCEPT].dstqid = def_qid; dbptr[DB_RES_ACCEPT].cle_priority = 1; dbptr[DB_RES_DEF].fpsel = def_fpsel; + dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel; dbptr[DB_RES_DEF].dstqid = def_qid; dbptr[DB_RES_DEF].cle_priority = 7; xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h index 9ac9f8e145ec..290d5d159ec2 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -91,6 +91,8 @@ #define CLE_DSTQIDH_LEN 5 #define CLE_FPSEL_POS 21 #define CLE_FPSEL_LEN 4 +#define CLE_NFPSEL_POS 17 +#define CLE_NFPSEL_LEN 4 #define CLE_PRIORITY_POS 5 #define CLE_PRIORITY_LEN 3 @@ -104,6 +106,7 @@ enum xgene_cle_ptree_nodes { PKT_PROT_NODE, RSS_IPV4_TCP_NODE, RSS_IPV4_UDP_NODE, + RSS_IPV4_OTHERS_NODE, LAST_NODE, MAX_NODES }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index d372d4235c81..28fdedc30b74 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -163,6 +163,74 @@ static void xgene_get_ethtool_stats(struct net_device *ndev, *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); } +static void xgene_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + + pp->autoneg = pdata->pause_autoneg; + pp->tx_pause = pdata->tx_pause; + pp->rx_pause = pdata->rx_pause; +} + +static int xgene_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + u32 oldadv, newadv; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || + pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + if (!phydev) + return -EINVAL; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + pp->rx_pause != pp->tx_pause)) + return -EINVAL; + + pdata->pause_autoneg = pp->autoneg; + pdata->tx_pause = pp->tx_pause; + pdata->rx_pause = pp->rx_pause; + + oldadv = phydev->advertising; + newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + + if (pp->rx_pause) + newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + + if (pp->tx_pause) + newadv ^= ADVERTISED_Asym_Pause; + + if (oldadv ^ newadv) { + phydev->advertising = newadv; + + if (phydev->autoneg) + return phy_start_aneg(phydev); + + if (!pp->autoneg) { + pdata->mac_ops->flowctl_tx(pdata, + pdata->tx_pause); + pdata->mac_ops->flowctl_rx(pdata, + pdata->rx_pause); + } + } + + } else { + if (pp->autoneg) + return -EINVAL; + + pdata->tx_pause = pp->tx_pause; + pdata->rx_pause = pp->rx_pause; + + pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); + pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); + } + + return 0; +} + static const struct ethtool_ops xgene_ethtool_ops = { .get_drvinfo = xgene_get_drvinfo, .get_link = ethtool_op_get_link, @@ -171,6 +239,8 @@ static const struct ethtool_ops xgene_ethtool_ops = { .get_ethtool_stats = xgene_get_ethtool_stats, .get_link_ksettings = xgene_get_link_ksettings, .set_link_ksettings = xgene_set_link_ksettings, + .get_pauseparam = xgene_get_pauseparam, + .set_pauseparam = xgene_set_pauseparam }; void xgene_enet_set_ethtool_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 5390ae89136c..06e681697c17 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -504,6 +504,56 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); } +static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size) +{ + xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size); +} + +static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata, + bool enable) +{ + u32 data; + + xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data); + + if (enable) + data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; + else + data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); + + xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data); +} + +static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + + if (enable) + data |= TX_FLOW_EN; + else + data &= ~TX_FLOW_EN; + + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data); + + pdata->mac_ops->enable_tx_pause(pdata, enable); +} + +static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + + if (enable) + data |= RX_FLOW_EN; + else + data &= ~RX_FLOW_EN; + + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data); +} + static void xgene_gmac_init(struct xgene_enet_pdata *pdata) { u32 value; @@ -527,6 +577,17 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata) /* Rtype should be copied from FP */ xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); + /* Configure HW pause frame generation */ + xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value); + value = (DEF_QUANTA << 16) | (value & 0xFFFF); + xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value); + + xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES); + xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES); + + xgene_gmac_flowctl_tx(pdata, pdata->tx_pause); + xgene_gmac_flowctl_rx(pdata, pdata->rx_pause); + /* Rx-Tx traffic resume */ xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); @@ -550,12 +611,14 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, - u32 dst_ring_num, u16 bufpool_id) + u32 dst_ring_num, u16 bufpool_id, + u16 nxtbufpool_id) { u32 cb; - u32 fpsel; + u32 fpsel, nxtfpsel; - fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + fpsel = xgene_enet_get_fpsel(bufpool_id); + nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; @@ -565,6 +628,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_FPSEL0_SET(&cb, fpsel); + CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); } @@ -652,16 +716,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { - u32 addr, val, data; - - val = xgene_enet_ring_bufnum(ring->id); + u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; - data = BIT(val - 0x20); + data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; - data = BIT(val); + data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); @@ -671,24 +733,24 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; - u32 pb, val; + u32 pb; int i; pb = 0; for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]->buf_pool; + pb |= BIT(xgene_enet_get_fpsel(ring->id)); + ring = pdata->rx_ring[i]->page_pool; + if (ring) + pb |= BIT(xgene_enet_get_fpsel(ring->id)); - val = xgene_enet_ring_bufnum(ring->id); - pb |= BIT(val - 0x20); } xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); pb = 0; for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; - - val = xgene_enet_ring_bufnum(ring->id); - pb |= BIT(val); + pb |= BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); @@ -698,6 +760,48 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) } } +static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + u16 lcladv, rmtadv = 0; + u32 rx_pause, tx_pause; + u8 flowctl = 0; + + if (!phydev->duplex || !pdata->pause_autoneg) + return 0; + + if (pdata->tx_pause) + flowctl |= FLOW_CTRL_TX; + + if (pdata->rx_pause) + flowctl |= FLOW_CTRL_RX; + + lcladv = mii_advertise_flowctrl(flowctl); + + if (phydev->pause) + rmtadv = LPA_PAUSE_CAP; + + if (phydev->asym_pause) + rmtadv |= LPA_PAUSE_ASYM; + + flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + tx_pause = !!(flowctl & FLOW_CTRL_TX); + rx_pause = !!(flowctl & FLOW_CTRL_RX); + + if (tx_pause != pdata->tx_pause) { + pdata->tx_pause = tx_pause; + pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); + } + + if (rx_pause != pdata->rx_pause) { + pdata->rx_pause = rx_pause; + pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); + } + + return 0; +} + static void xgene_enet_adjust_link(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); @@ -712,6 +816,8 @@ static void xgene_enet_adjust_link(struct net_device *ndev) mac_ops->tx_enable(pdata); phy_print_status(phydev); } + + xgene_enet_flowctrl_cfg(ndev); } else { mac_ops->rx_disable(pdata); mac_ops->tx_disable(pdata); @@ -785,6 +891,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) phy_dev->supported &= ~SUPPORTED_10baseT_Half & ~SUPPORTED_100baseT_Half & ~SUPPORTED_1000baseT_Half; + phy_dev->supported |= SUPPORTED_Pause | + SUPPORTED_Asym_Pause; phy_dev->advertising = phy_dev->supported; return 0; @@ -902,6 +1010,10 @@ const struct xgene_mac_ops xgene_gmac_ops = { .tx_disable = xgene_gmac_tx_disable, .set_speed = xgene_gmac_set_speed, .set_mac_addr = xgene_gmac_set_mac_addr, + .set_framesize = xgene_enet_set_frame_size, + .enable_tx_pause = xgene_gmac_enable_tx_pause, + .flowctl_tx = xgene_gmac_flowctl_tx, + .flowctl_rx = xgene_gmac_flowctl_rx, }; const struct xgene_port_ops xgene_gport_ops = { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 06e598c8bc16..5f83037bb96b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -165,10 +165,23 @@ enum xgene_enet_rm { #define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) #define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) +#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4) #define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) #define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) -#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0)) -#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16)) +#define CFG_CLE_DSTQID0(val) ((val) & GENMASK(11, 0)) +#define CFG_CLE_FPSEL0(val) (((val) << 16) & GENMASK(19, 16)) +#define CSR_ECM_CFG_0_ADDR 0x0220 +#define CSR_ECM_CFG_1_ADDR 0x0224 +#define CSR_MULTI_DPF0_ADDR 0x0230 +#define RXBUF_PAUSE_THRESH 0x0534 +#define RXBUF_PAUSE_OFF_THRESH 0x0540 +#define DEF_PAUSE_THRES 0x7d +#define DEF_PAUSE_OFF_THRES 0x6d +#define DEF_QUANTA 0x8000 +#define NORM_PAUSE_OPCODE 0x0001 +#define PAUSE_XON_EN BIT(30) +#define MULTI_DPF_AUTOCTRL BIT(28) +#define CFG_CLE_NXTFPSEL0(val) (((val) << 20) & GENMASK(23, 20)) #define ICM_CONFIG0_REG_0_ADDR 0x0400 #define ICM_CONFIG2_REG_0_ADDR 0x0410 #define RX_DV_GATE_REG_0_ADDR 0x05fc @@ -196,6 +209,8 @@ enum xgene_enet_rm { #define SOFT_RESET1 BIT(31) #define TX_EN BIT(0) #define RX_EN BIT(2) +#define TX_FLOW_EN BIT(4) +#define RX_FLOW_EN BIT(5) #define ENET_LHD_MODE BIT(25) #define ENET_GHD_MODE BIT(26) #define FULL_DUPLEX2 BIT(0) @@ -346,6 +361,14 @@ static inline bool xgene_enet_is_bufpool(u16 id) return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false; } +static inline u8 xgene_enet_get_fpsel(u16 id) +{ + if (xgene_enet_is_bufpool(id)) + return xgene_enet_ring_bufnum(id) - RING_BUFNUM_BUFPOOL; + + return 0; +} + static inline u16 xgene_enet_get_numslots(u16 id, u32 size) { bool is_bufpool = xgene_enet_is_bufpool(id); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 1352b5245fcc..6c7eea8b36af 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -37,6 +37,9 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) struct xgene_enet_raw_desc16 *raw_desc; int i; + if (!buf_pool) + return; + for (i = 0; i < buf_pool->slots; i++) { raw_desc = &buf_pool->raw_desc16[i]; @@ -47,6 +50,86 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) } } +static u16 xgene_enet_get_data_len(u64 bufdatalen) +{ + u16 hw_len, mask; + + hw_len = GET_VAL(BUFDATALEN, bufdatalen); + + if (unlikely(hw_len == 0x7800)) { + return 0; + } else if (!(hw_len & BIT(14))) { + mask = GENMASK(13, 0); + return (hw_len & mask) ? (hw_len & mask) : SIZE_16K; + } else if (!(hw_len & GENMASK(13, 12))) { + mask = GENMASK(11, 0); + return (hw_len & mask) ? (hw_len & mask) : SIZE_4K; + } else { + mask = GENMASK(11, 0); + return (hw_len & mask) ? (hw_len & mask) : SIZE_2K; + } +} + +static u16 xgene_enet_set_data_len(u32 size) +{ + u16 hw_len; + + hw_len = (size == SIZE_4K) ? BIT(14) : 0; + + return hw_len; +} + +static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool, + u32 nbuf) +{ + struct xgene_enet_raw_desc16 *raw_desc; + struct xgene_enet_pdata *pdata; + struct net_device *ndev; + dma_addr_t dma_addr; + struct device *dev; + struct page *page; + u32 slots, tail; + u16 hw_len; + int i; + + if (unlikely(!buf_pool)) + return 0; + + ndev = buf_pool->ndev; + pdata = netdev_priv(ndev); + dev = ndev_to_dev(ndev); + slots = buf_pool->slots - 1; + tail = buf_pool->tail; + + for (i = 0; i < nbuf; i++) { + raw_desc = &buf_pool->raw_desc16[tail]; + + page = dev_alloc_page(); + if (unlikely(!page)) + return -ENOMEM; + + dma_addr = dma_map_page(dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) { + put_page(page); + return -ENOMEM; + } + + hw_len = xgene_enet_set_data_len(PAGE_SIZE); + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, hw_len) | + SET_BIT(COHERENT)); + + buf_pool->frag_page[tail] = page; + tail = (tail + 1) & slots; + } + + pdata->ring_ops->wr_cmd(buf_pool, nbuf); + buf_pool->tail = tail; + + return 0; +} + static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, u32 nbuf) { @@ -64,8 +147,9 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, ndev = buf_pool->ndev; dev = ndev_to_dev(buf_pool->ndev); pdata = netdev_priv(ndev); + bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); - len = XGENE_ENET_MAX_MTU; + len = XGENE_ENET_STD_MTU; for (i = 0; i < nbuf; i++) { raw_desc = &buf_pool->raw_desc16[tail]; @@ -122,6 +206,25 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) } } +static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool) +{ + struct device *dev = ndev_to_dev(buf_pool->ndev); + dma_addr_t dma_addr; + struct page *page; + int i; + + /* Free up the buffers held by hardware */ + for (i = 0; i < buf_pool->slots; i++) { + page = buf_pool->frag_page[i]; + if (page) { + dma_addr = buf_pool->frag_dma_addr[i]; + dma_unmap_page(dev, dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + put_page(page); + } + } +} + static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) { struct xgene_enet_desc_ring *rx_ring = data; @@ -515,23 +618,66 @@ static void xgene_enet_skip_csum(struct sk_buff *skb) } } +static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, + struct xgene_enet_raw_desc *raw_desc, + struct xgene_enet_raw_desc *exp_desc) +{ + __le64 *desc = (void *)exp_desc; + dma_addr_t dma_addr; + struct device *dev; + struct page *page; + u16 slots, head; + u32 frag_size; + int i; + + if (!buf_pool || !raw_desc || !exp_desc || + (!GET_VAL(NV, le64_to_cpu(raw_desc->m0)))) + return; + + dev = ndev_to_dev(buf_pool->ndev); + head = buf_pool->head; + + for (i = 0; i < 4; i++) { + frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); + if (!frag_size) + break; + + dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + + page = buf_pool->frag_page[head]; + put_page(page); + + buf_pool->frag_page[head] = NULL; + head = (head + 1) & slots; + } + buf_pool->head = head; +} + static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, - struct xgene_enet_raw_desc *raw_desc) + struct xgene_enet_raw_desc *raw_desc, + struct xgene_enet_raw_desc *exp_desc) { + struct xgene_enet_desc_ring *buf_pool, *page_pool; + u32 datalen, frag_size, skb_index; struct net_device *ndev; - struct device *dev; - struct xgene_enet_desc_ring *buf_pool; - u32 datalen, skb_index; + dma_addr_t dma_addr; struct sk_buff *skb; + struct device *dev; + struct page *page; + u16 slots, head; + int i, ret = 0; + __le64 *desc; u8 status; - int ret = 0; + bool nv; ndev = rx_ring->ndev; dev = ndev_to_dev(rx_ring->ndev); buf_pool = rx_ring->buf_pool; + page_pool = rx_ring->page_pool; dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), - XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); + XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb = buf_pool->rx_skb[skb_index]; buf_pool->rx_skb[skb_index] = NULL; @@ -541,6 +687,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); if (unlikely(status > 2)) { dev_kfree_skb_any(skb); + xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), status); ret = -EIO; @@ -548,11 +695,44 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, } /* strip off CRC as HW isn't doing this */ - datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); - datalen = (datalen & DATALEN_MASK) - 4; - prefetch(skb->data - NET_IP_ALIGN); + datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); + + nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); + if (!nv) + datalen -= 4; + skb_put(skb, datalen); + prefetch(skb->data - NET_IP_ALIGN); + + if (!nv) + goto skip_jumbo; + + slots = page_pool->slots - 1; + head = page_pool->head; + desc = (void *)exp_desc; + + for (i = 0; i < 4; i++) { + frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); + if (!frag_size) + break; + + dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + page = page_pool->frag_page[head]; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, + frag_size, PAGE_SIZE); + + datalen += frag_size; + + page_pool->frag_page[head] = NULL; + head = (head + 1) & slots; + } + + page_pool->head = head; + rx_ring->npagepool -= skb_shinfo(skb)->nr_frags; + +skip_jumbo: skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, ndev); if (likely((ndev->features & NETIF_F_IP_CSUM) && @@ -563,7 +743,15 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, rx_ring->rx_packets++; rx_ring->rx_bytes += datalen; napi_gro_receive(&rx_ring->napi, skb); + out: + if (rx_ring->npagepool <= 0) { + ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL); + rx_ring->npagepool = NUM_NXTBUFPOOL; + if (ret) + return ret; + } + if (--rx_ring->nbufpool == 0) { ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); rx_ring->nbufpool = NUM_BUFPOOL; @@ -611,7 +799,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, desc_count++; } if (is_rx_desc(raw_desc)) { - ret = xgene_enet_rx_frame(ring, raw_desc); + ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc); } else { ret = xgene_enet_tx_completion(ring, raw_desc); is_completion = true; @@ -854,7 +1042,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) { - struct xgene_enet_desc_ring *buf_pool; + struct xgene_enet_desc_ring *buf_pool, *page_pool; struct xgene_enet_desc_ring *ring; int i; @@ -867,18 +1055,28 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) xgene_enet_delete_ring(ring->cp_ring); pdata->tx_ring[i] = NULL; } + } for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; if (ring) { + page_pool = ring->page_pool; + if (page_pool) { + xgene_enet_delete_pagepool(page_pool); + xgene_enet_delete_ring(page_pool); + pdata->port_ops->clear(pdata, page_pool); + } + buf_pool = ring->buf_pool; xgene_enet_delete_bufpool(buf_pool); xgene_enet_delete_ring(buf_pool); pdata->port_ops->clear(pdata, buf_pool); + xgene_enet_delete_ring(ring); pdata->rx_ring[i] = NULL; } + } } @@ -931,8 +1129,10 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) { + struct xgene_enet_desc_ring *page_pool; struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; + void *p; int i; for (i = 0; i < pdata->txq_cnt; i++) { @@ -940,10 +1140,13 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) if (ring) { if (ring->cp_ring && ring->cp_ring->cp_skb) devm_kfree(dev, ring->cp_ring->cp_skb); + if (ring->cp_ring && pdata->cq_cnt) xgene_enet_free_desc_ring(ring->cp_ring); + xgene_enet_free_desc_ring(ring); } + } for (i = 0; i < pdata->rxq_cnt; i++) { @@ -952,8 +1155,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) if (ring->buf_pool) { if (ring->buf_pool->rx_skb) devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); } + + page_pool = ring->page_pool; + if (page_pool) { + p = page_pool->frag_page; + if (p) + devm_kfree(dev, p); + + p = page_pool->frag_dma_addr; + if (p) + devm_kfree(dev, p); + } + xgene_enet_free_desc_ring(ring); } } @@ -1071,19 +1287,20 @@ static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) static int xgene_enet_create_desc_rings(struct net_device *ndev) { - struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_enet_desc_ring *page_pool = NULL; struct xgene_enet_desc_ring *buf_pool = NULL; - enum xgene_ring_owner owner; - dma_addr_t dma_exp_bufs; - u8 cpu_bufnum; + struct device *dev = ndev_to_dev(ndev); u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; + enum xgene_ring_owner owner; + dma_addr_t dma_exp_bufs; + u16 ring_id, slots; __le64 *exp_bufs; - u16 ring_id; int i, ret, size; + u8 cpu_bufnum; cpu_bufnum = xgene_start_cpu_bufnum(pdata); @@ -1103,7 +1320,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) owner = xgene_derive_ring_owner(pdata); ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_2KB, + RING_CFGSIZE_16KB, ring_id); if (!buf_pool) { ret = -ENOMEM; @@ -1111,7 +1328,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) } rx_ring->nbufpool = NUM_BUFPOOL; - rx_ring->buf_pool = buf_pool; + rx_ring->npagepool = NUM_NXTBUFPOOL; rx_ring->irq = pdata->irqs[i]; buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, sizeof(struct sk_buff *), @@ -1124,6 +1341,42 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); rx_ring->buf_pool = buf_pool; pdata->rx_ring[i] = rx_ring; + + if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) || + (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) { + break; + } + + /* allocate next buffer pool for jumbo packets */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); + page_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!page_pool) { + ret = -ENOMEM; + goto err; + } + + slots = page_pool->slots; + page_pool->frag_page = devm_kcalloc(dev, slots, + sizeof(struct page *), + GFP_KERNEL); + if (!page_pool->frag_page) { + ret = -ENOMEM; + goto err; + } + + page_pool->frag_dma_addr = devm_kcalloc(dev, slots, + sizeof(dma_addr_t), + GFP_KERNEL); + if (!page_pool->frag_dma_addr) { + ret = -ENOMEM; + goto err; + } + + page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool); + rx_ring->page_pool = page_pool; } for (i = 0; i < pdata->txq_cnt; i++) { @@ -1247,12 +1500,31 @@ static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) return ret; } +static int xgene_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + int frame_size; + + if (!netif_running(ndev)) + return 0; + + frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; + + xgene_enet_close(ndev); + ndev->mtu = new_mtu; + pdata->mac_ops->set_framesize(pdata, frame_size); + xgene_enet_open(ndev); + + return 0; +} + static const struct net_device_ops xgene_ndev_ops = { .ndo_open = xgene_enet_open, .ndo_stop = xgene_enet_close, .ndo_start_xmit = xgene_enet_start_xmit, .ndo_tx_timeout = xgene_enet_timeout, .ndo_get_stats64 = xgene_enet_get_stats64, + .ndo_change_mtu = xgene_change_mtu, .ndo_set_mac_address = xgene_enet_set_mac_address, }; @@ -1518,10 +1790,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) { struct xgene_enet_cle *enet_cle = &pdata->cle; + struct xgene_enet_desc_ring *page_pool; struct net_device *ndev = pdata->ndev; struct xgene_enet_desc_ring *buf_pool; - u16 dst_ring_num; + u16 dst_ring_num, ring_id; int i, ret; + u32 count; ret = pdata->port_ops->reset(pdata); if (ret) @@ -1537,9 +1811,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) for (i = 0; i < pdata->rxq_cnt; i++) { buf_pool = pdata->rx_ring[i]->buf_pool; xgene_enet_init_bufpool(buf_pool); - ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + page_pool = pdata->rx_ring[i]->page_pool; + xgene_enet_init_bufpool(page_pool); + + count = pdata->rx_buff_cnt; + ret = xgene_enet_refill_bufpool(buf_pool, count); if (ret) goto err; + + ret = xgene_enet_refill_pagepool(page_pool, count); + if (ret) + goto err; + } dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); @@ -1558,10 +1841,17 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) netdev_err(ndev, "Preclass Tree init error\n"); goto err; } + } else { - pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + buf_pool = pdata->rx_ring[0]->buf_pool; + page_pool = pdata->rx_ring[0]->page_pool; + ring_id = (page_pool) ? page_pool->id : 0; + pdata->port_ops->cle_bypass(pdata, dst_ring_num, + buf_pool->id, ring_id); } + ndev->max_mtu = XGENE_ENET_MAX_MTU; pdata->phy_speed = SPEED_UNKNOWN; pdata->mac_ops->init(pdata); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 011965b54d1f..52571741da9f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -41,11 +41,14 @@ #include "../../../phy/mdio-xgene.h" #define XGENE_DRV_VERSION "v1.0" -#define XGENE_ENET_MAX_MTU 1536 -#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) +#define XGENE_ENET_STD_MTU 1536 +#define XGENE_ENET_MAX_MTU 9600 +#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN) + #define BUFLEN_16K (16 * 1024) -#define NUM_PKT_BUF 64 +#define NUM_PKT_BUF 1024 #define NUM_BUFPOOL 32 +#define NUM_NXTBUFPOOL 8 #define MAX_EXP_BUFFS 256 #define NUM_MSS_REG 4 #define XGENE_MIN_ENET_FRAME_SIZE 60 @@ -88,6 +91,12 @@ enum xgene_enet_id { XGENE_ENET2 }; +enum xgene_enet_buf_len { + SIZE_2K = 2048, + SIZE_4K = 4096, + SIZE_16K = 16384 +}; + /* software context of a descriptor ring */ struct xgene_enet_desc_ring { struct net_device *ndev; @@ -107,14 +116,18 @@ struct xgene_enet_desc_ring { dma_addr_t irq_mbox_dma; void *irq_mbox_addr; u16 dst_ring_num; - u8 nbufpool; + u16 nbufpool; + int npagepool; u8 index; + u32 flags; struct sk_buff *(*rx_skb); struct sk_buff *(*cp_skb); dma_addr_t *frag_dma_addr; + struct page *(*frag_page); enum xgene_enet_ring_cfgsize cfgsize; struct xgene_enet_desc_ring *cp_ring; struct xgene_enet_desc_ring *buf_pool; + struct xgene_enet_desc_ring *page_pool; struct napi_struct napi; union { void *desc_addr; @@ -143,8 +156,12 @@ struct xgene_mac_ops { void (*rx_disable)(struct xgene_enet_pdata *pdata); void (*set_speed)(struct xgene_enet_pdata *pdata); void (*set_mac_addr)(struct xgene_enet_pdata *pdata); + void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize); void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index); void (*link_state)(struct work_struct *work); + void (*enable_tx_pause)(struct xgene_enet_pdata *pdata, bool enable); + void (*flowctl_rx)(struct xgene_enet_pdata *pdata, bool enable); + void (*flowctl_tx)(struct xgene_enet_pdata *pdata, bool enable); }; struct xgene_port_ops { @@ -152,7 +169,7 @@ struct xgene_port_ops { void (*clear)(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring); void (*cle_bypass)(struct xgene_enet_pdata *pdata, - u32 dst_ring_num, u16 bufpool_id); + u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id); void (*shutdown)(struct xgene_enet_pdata *pdata); }; @@ -220,6 +237,9 @@ struct xgene_enet_pdata { bool mdio_driver; struct gpio_desc *sfp_rdy; bool sfp_gpio_en; + u32 pause_autoneg; + bool tx_pause; + bool rx_pause; }; struct xgene_indirect_ctl { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index af51dd5844ce..4ff40559f970 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c @@ -119,6 +119,7 @@ static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ring_id_buf |= PREFETCH_BUF_EN; + if (is_bufpool) ring_id_buf |= IS_BUFFER_POOL; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index d12e9cbae820..a8e063bdee3b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -343,6 +343,11 @@ static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p) xgene_enet_wr_mcx_csr(p, icm2_addr, icm2); } +static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size) +{ + xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size); +} + static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) { u32 data, loop = 10; @@ -360,11 +365,39 @@ static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) netdev_err(p->ndev, "Auto-negotiation failed\n"); } +static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) +{ + u32 data; + + data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); + + if (set) + data |= bits; + else + data &= ~bits; + + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); +} + +static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable) +{ + xgene_sgmac_rxtx(p, TX_FLOW_EN, enable); + + p->mac_ops->enable_tx_pause(p, enable); +} + +static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) +{ + xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable); +} + static void xgene_sgmac_init(struct xgene_enet_pdata *p) { + u32 pause_thres_reg, pause_off_thres_reg; u32 enet_spare_cfg_reg, rsif_config_reg; u32 cfg_bypass_reg, rx_dv_gate_reg; - u32 data, offset; + u32 data, data1, data2, offset; + u32 multi_dpf_reg; if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver)) xgene_sgmac_reset(p); @@ -400,24 +433,50 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; xgene_enet_wr_csr(p, rsif_config_reg, data); - /* Bypass traffic gating */ - xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84); - xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX); - xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0); -} + /* Configure HW pause frame generation */ + multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR : + XG_MCX_MULTI_DPF0_ADDR; + data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg); + data = (DEF_QUANTA << 16) | (data & 0xffff); + xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data); + + if (p->enet_id != XGENE_ENET1) { + data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR); + data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF); + xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data); + } -static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) -{ - u32 data; + pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH : + XG_RXBUF_PAUSE_THRESH; + pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ? + RXBUF_PAUSE_OFF_THRESH : 0; - data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); + if (p->enet_id == XGENE_ENET1) { + data1 = xgene_enet_rd_csr(p, pause_thres_reg); + data2 = xgene_enet_rd_csr(p, pause_off_thres_reg); + + if (!(p->port_id % 2)) { + data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES; + data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES; + } else { + data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16); + data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16); + } - if (set) - data |= bits; - else - data &= ~bits; + xgene_enet_wr_csr(p, pause_thres_reg, data1); + xgene_enet_wr_csr(p, pause_off_thres_reg, data2); + } else { + data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES; + xgene_enet_wr_csr(p, pause_thres_reg, data); + } - xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); + xgene_sgmac_flowctl_tx(p, p->tx_pause); + xgene_sgmac_flowctl_rx(p, p->rx_pause); + + /* Bypass traffic gating */ + xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84); + xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX); + xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0); } static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p) @@ -484,11 +543,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p) } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, - u32 dst_ring_num, u16 bufpool_id) + u32 dst_ring_num, u16 bufpool_id, + u16 nxtbufpool_id) { - u32 data, fpsel; u32 cle_bypass_reg0, cle_bypass_reg1; u32 offset = p->port_id * MAC_OFFSET; + u32 data, fpsel, nxtfpsel; if (p->enet_id == XGENE_ENET1) { cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR; @@ -501,24 +561,24 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, data = CFG_CLE_BYPASS_EN0; xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data); - fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; - data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); + fpsel = xgene_enet_get_fpsel(bufpool_id); + nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); + data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) | + CFG_CLE_NXTFPSEL0(nxtfpsel); xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data); } static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { - u32 addr, val, data; - - val = xgene_enet_ring_bufnum(ring->id); + u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; - data = BIT(val - 0x20); + data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; - data = BIT(val); + data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); @@ -528,24 +588,23 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *p) { struct device *dev = &p->pdev->dev; struct xgene_enet_desc_ring *ring; - u32 pb, val; + u32 pb; int i; pb = 0; for (i = 0; i < p->rxq_cnt; i++) { ring = p->rx_ring[i]->buf_pool; - - val = xgene_enet_ring_bufnum(ring->id); - pb |= BIT(val - 0x20); + pb |= BIT(xgene_enet_get_fpsel(ring->id)); + ring = p->rx_ring[i]->page_pool; + if (ring) + pb |= BIT(xgene_enet_get_fpsel(ring->id)); } xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb); pb = 0; for (i = 0; i < p->txq_cnt; i++) { ring = p->tx_ring[i]; - - val = xgene_enet_ring_bufnum(ring->id); - pb |= BIT(val); + pb |= BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb); @@ -586,6 +645,25 @@ static void xgene_enet_link_state(struct work_struct *work) schedule_delayed_work(&p->link_work, poll_interval); } +static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable) +{ + u32 data, ecm_cfg_addr; + + if (p->enet_id == XGENE_ENET1) { + ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR : + CSR_ECM_CFG_1_ADDR; + } else { + ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR; + } + + data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr); + if (enable) + data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; + else + data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); + xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data); +} + const struct xgene_mac_ops xgene_sgmac_ops = { .init = xgene_sgmac_init, .reset = xgene_sgmac_reset, @@ -595,7 +673,11 @@ const struct xgene_mac_ops xgene_sgmac_ops = { .tx_disable = xgene_sgmac_tx_disable, .set_speed = xgene_sgmac_set_speed, .set_mac_addr = xgene_sgmac_set_mac_addr, - .link_state = xgene_enet_link_state + .set_framesize = xgene_sgmac_set_frame_size, + .link_state = xgene_enet_link_state, + .enable_tx_pause = xgene_sgmac_enable_tx_pause, + .flowctl_tx = xgene_sgmac_flowctl_tx, + .flowctl_rx = xgene_sgmac_flowctl_rx }; const struct xgene_port_ops xgene_sgport_ops = { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index d1758b072623..ece19e6d68e3 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -101,6 +101,14 @@ static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata, wr_addr); } +static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + iowrite32(val, addr); +} + static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { @@ -174,6 +182,14 @@ static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata, return success; } +static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + *val = ioread32(addr); +} + static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; @@ -250,6 +266,12 @@ static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata, xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data); } +static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size) +{ + xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, + ((((size + 2) >> 2) << 16) | size)); +} + static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) { u32 data; @@ -259,6 +281,51 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) return data; } +static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata, + bool enable) +{ + u32 data; + + xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data); + + if (enable) + data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; + else + data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); + + xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data); +} + +static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + + if (enable) + data |= HSTTCTLEN; + else + data &= ~HSTTCTLEN; + + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); + + pdata->mac_ops->enable_tx_pause(pdata, enable); +} + +static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + + if (enable) + data |= HSTRCTLEN; + else + data &= ~HSTRCTLEN; + + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); +} + static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) { u32 data; @@ -282,6 +349,23 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); + + /* Configure HW pause frame generation */ + xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data); + data = (DEF_QUANTA << 16) | (data & 0xFFFF); + xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data); + + if (pdata->enet_id != XGENE_ENET1) { + xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data); + data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF); + xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data); + } + + data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES; + xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data); + + xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause); + xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause); } static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) @@ -350,44 +434,47 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) } static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, - u32 dst_ring_num, u16 bufpool_id) + u32 dst_ring_num, u16 bufpool_id, + u16 nxtbufpool_id) { - u32 cb, fpsel; + u32 cb, fpsel, nxtfpsel; xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb); - fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + fpsel = xgene_enet_get_fpsel(bufpool_id); + nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_FPSEL0_SET(&cb, fpsel); + CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); + pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel); } static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; - u32 pb, val; + u32 pb; int i; pb = 0; for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]->buf_pool; - - val = xgene_enet_ring_bufnum(ring->id); - pb |= BIT(val - 0x20); + pb |= BIT(xgene_enet_get_fpsel(ring->id)); + ring = pdata->rx_ring[i]->page_pool; + if (ring) + pb |= BIT(xgene_enet_get_fpsel(ring->id)); } xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); pb = 0; for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; - - val = xgene_enet_ring_bufnum(ring->id); - pb |= BIT(val); + pb |= BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); @@ -400,16 +487,14 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { - u32 addr, val, data; - - val = xgene_enet_ring_bufnum(ring->id); + u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; - data = BIT(val - 0x20); + data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; - data = BIT(val); + data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); @@ -473,8 +558,12 @@ const struct xgene_mac_ops xgene_xgmac_ops = { .rx_disable = xgene_xgmac_rx_disable, .tx_disable = xgene_xgmac_tx_disable, .set_mac_addr = xgene_xgmac_set_mac_addr, + .set_framesize = xgene_xgmac_set_frame_size, .set_mss = xgene_xgmac_set_mss, - .link_state = xgene_enet_link_state + .link_state = xgene_enet_link_state, + .enable_tx_pause = xgene_xgmac_enable_tx_pause, + .flowctl_rx = xgene_xgmac_flowctl_rx, + .flowctl_tx = xgene_xgmac_flowctl_tx }; const struct xgene_port_ops xgene_xgport_ops = { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index 360ccbd95566..03b847ad8937 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -59,6 +59,11 @@ #define HSTMAXFRAME_LENGTH_ADDR 0x0020 #define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004 +#define XG_MCX_ECM_CFG_0_ADDR 0x0074 +#define XG_MCX_MULTI_DPF0_ADDR 0x007c +#define XG_MCX_MULTI_DPF1_ADDR 0x0080 +#define XG_DEF_PAUSE_THRES 0x390 +#define XG_DEF_PAUSE_OFF_THRES 0x2c0 #define XG_RSIF_CONFIG_REG_ADDR 0x00a0 #define XCLE_BYPASS_REG0_ADDR 0x0160 #define XCLE_BYPASS_REG1_ADDR 0x0164 @@ -70,6 +75,10 @@ #define XG_ENET_SPARE_CFG_REG_ADDR 0x040c #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 +#define XGENET_CSR_ECM_CFG_0_ADDR 0x0880 +#define XGENET_CSR_MULTI_DPF0_ADDR 0x0888 +#define XGENET_CSR_MULTI_DPF1_ADDR 0x088c +#define XG_RXBUF_PAUSE_THRESH 0x0020 #define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0 #define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8 |