diff options
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 100 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 255 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 216 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 8 |
8 files changed, 482 insertions, 141 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fead64f1ad90..4bbfea147d98 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1766,7 +1766,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - bnapi->cp_ring.rx_buf_errors++; + bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); @@ -1849,7 +1849,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (dev->features & NETIF_F_RXCSUM) - bnapi->cp_ring.rx_l4_csum_errors++; + bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; } } @@ -5045,8 +5045,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); req.lb_rule = cpu_to_le16(0xffff); vnic_mru: - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN); + req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV @@ -5356,9 +5355,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, { if (bp->flags & BNXT_FLAG_CHIP_P5) { if (BNXT_PF(bp)) - db->doorbell = bp->bar1 + 0x10000; + db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; else - db->doorbell = bp->bar1 + 0x4000; + db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; switch (ring_type) { case HWRM_RING_ALLOC_TX: db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; @@ -6365,6 +6364,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u32 min_db_offset = 0; u16 flags; int rc; @@ -6413,6 +6413,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (!bp->max_mtu) bp->max_mtu = BNXT_MAX_MTU; + if (bp->db_size) + goto func_qcfg_exit; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (BNXT_PF(bp)) + min_db_offset = DB_PF_OFFSET_P5; + else + min_db_offset = DB_VF_OFFSET_P5; + } + bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * + 1024); + if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || + bp->db_size <= min_db_offset) + bp->db_size = pci_resource_len(bp->pdev, 2); + func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -6434,23 +6449,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) if (!rc) { struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; - int i; + int i, tqm_rings; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { rc = -ENOMEM; goto ctx_err; } - ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); - if (!ctx_pg) { - kfree(ctx); - rc = -ENOMEM; - goto ctx_err; - } - for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) - ctx->tqm_mem[i] = ctx_pg; - - bp->ctx = ctx; ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); @@ -6483,6 +6488,20 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); ctx->ctx_kind_initializer = resp->ctx_kind_initializer; + ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; + if (!ctx->tqm_fp_rings_count) + ctx->tqm_fp_rings_count = bp->max_q; + + tqm_rings = ctx->tqm_fp_rings_count + 1; + ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); + if (!ctx_pg) { + kfree(ctx); + rc = -ENOMEM; + goto ctx_err; + } + for (i = 0; i < tqm_rings; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + bp->ctx = ctx; } else { rc = 0; } @@ -6735,7 +6754,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) return; if (ctx->tqm_mem[0]) { - for (i = 0; i < bp->max_q + 1; i++) + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); kfree(ctx->tqm_mem[0]); ctx->tqm_mem[0] = NULL; @@ -6756,6 +6775,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; u32 mem_size, ena, entries; + u32 entries_sp, min; u32 num_mr, num_ah; u32 extra_srqs = 0; u32 extra_qps = 0; @@ -6845,14 +6865,17 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; skip_rdma: - entries = ctx->qp_max_l2_entries + extra_qps; + min = ctx->tqm_min_entries_per_ring; + entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + + 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; + entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); + entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries; entries = roundup(entries, ctx->tqm_entries_multiple); - entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, - ctx->tqm_max_entries_per_ring); - for (i = 0; i < bp->max_q + 1; i++) { + entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { ctx_pg = ctx->tqm_mem[i]; - ctx_pg->entries = entries; - mem_size = ctx->tqm_entry_size * entries; + ctx_pg->entries = i ? entries : entries_sp; + mem_size = ctx->tqm_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); if (rc) return rc; @@ -10262,7 +10285,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) bnxt_dbg_hwrm_ring_info_get(bp, DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, fw_ring_id, &val[0], &val[1]); - cpr->missed_irqs++; + cpr->sw_stats.cmn.missed_irqs++; } } } @@ -10891,6 +10914,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->dev = dev; bp->pdev = pdev; + /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() + * determines the BAR size. + */ bp->bar0 = pci_ioremap_bar(pdev, 0); if (!bp->bar0) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); @@ -10898,13 +10924,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) goto init_err_release; } - bp->bar1 = pci_ioremap_bar(pdev, 2); - if (!bp->bar1) { - dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); - rc = -ENOMEM; - goto init_err_release; - } - bp->bar2 = pci_ioremap_bar(pdev, 4); if (!bp->bar2) { dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); @@ -11826,6 +11845,16 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) return 0; } +static int bnxt_map_db_bar(struct bnxt *bp) +{ + if (!bp->db_size) + return -ENODEV; + bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); + if (!bp->bar1) + return -ENOMEM; + return 0; +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; @@ -11886,6 +11915,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + rc = bnxt_map_db_bar(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", + rc); + goto init_err_pci_clean; + } + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f2caa2756f5b..c15517ff7ff6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -537,6 +537,9 @@ struct nqe_cn { #define DBR_TYPE_NQ_ARM (0xbULL << 60) #define DBR_TYPE_NULL (0xfULL << 60) +#define DB_PF_OFFSET_P5 0x10000 +#define DB_VF_OFFSET_P5 0x4000 + #define INVALID_HW_RING_ID ((u16)-1) /* The hardware supports certain page sizes. Use the supported page sizes @@ -907,6 +910,20 @@ struct bnxt_rx_ring_info { struct page_pool *page_pool; }; +struct bnxt_rx_sw_stats { + u64 rx_l4_csum_errors; + u64 rx_buf_errors; +}; + +struct bnxt_cmn_sw_stats { + u64 missed_irqs; +}; + +struct bnxt_sw_stats { + struct bnxt_rx_sw_stats rx; + struct bnxt_cmn_sw_stats cmn; +}; + struct bnxt_cp_ring_info { struct bnxt_napi *bnapi; u32 cp_raw_cons; @@ -934,9 +951,8 @@ struct bnxt_cp_ring_info { struct ctx_hw_stats *hw_stats; dma_addr_t hw_stats_map; u32 hw_stats_ctx_id; - u64 rx_l4_csum_errors; - u64 rx_buf_errors; - u64 missed_irqs; + + struct bnxt_sw_stats sw_stats; struct bnxt_ring_struct cp_ring_struct; @@ -1357,6 +1373,7 @@ struct bnxt_ctx_mem_info { u16 mrav_num_entries_units; u8 tqm_entries_multiple; u8 ctx_kind_initializer; + u8 tqm_fp_rings_count; u32 flags; #define BNXT_CTX_FLAG_INITED 0x01 @@ -1816,6 +1833,7 @@ struct bnxt { /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ spinlock_t db_lock; #endif + int db_size; #define BNXT_NTP_FLTR_MAX_FLTR 4096 #define BNXT_NTP_FLTR_HASH_SIZE 512 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 34046a6286e8..07526868f7be 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,7 +137,7 @@ reset_coalesce: return rc; } -static const char * const bnxt_ring_stats_str[] = { +static const char * const bnxt_ring_rx_stats_str[] = { "rx_ucast_packets", "rx_mcast_packets", "rx_bcast_packets", @@ -146,6 +146,9 @@ static const char * const bnxt_ring_stats_str[] = { "rx_ucast_bytes", "rx_mcast_bytes", "rx_bcast_bytes", +}; + +static const char * const bnxt_ring_tx_stats_str[] = { "tx_ucast_packets", "tx_mcast_packets", "tx_bcast_packets", @@ -171,9 +174,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = { "rx_tpa_errors", }; -static const char * const bnxt_ring_sw_stats_str[] = { +static const char * const bnxt_rx_sw_stats_str[] = { "rx_l4_csum_errors", "rx_buf_errors", +}; + +static const char * const bnxt_cmn_sw_stats_str[] = { "missed_irqs", }; @@ -303,6 +309,11 @@ static struct { {0, "tx_total_discard_pkts"}, }; +#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) +#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) +#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) +#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -482,12 +493,21 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) static int bnxt_get_num_ring_stats(struct bnxt *bp) { - int num_stats; + int rx, tx, cmn; + bool sh = false; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; - num_stats = ARRAY_SIZE(bnxt_ring_stats_str) + - ARRAY_SIZE(bnxt_ring_sw_stats_str) + - bnxt_get_num_tpa_ring_stats(bp); - return num_stats * bp->cp_nr_rings; + rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + + bnxt_get_num_tpa_ring_stats(bp); + tx = NUM_RING_TX_HW_STATS; + cmn = NUM_RING_CMN_SW_STATS; + if (sh) + return (rx + tx + cmn) * bp->cp_nr_rings; + else + return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + + cmn * bp->cp_nr_rings; } static int bnxt_get_num_stats(struct bnxt *bp) @@ -528,13 +548,29 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset) } } +static bool is_rx_ring(struct bnxt *bp, int ring_num) +{ + return ring_num < bp->rx_nr_rings; +} + +static bool is_tx_ring(struct bnxt *bp, int ring_num) +{ + int tx_base = 0; + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + tx_base = bp->rx_nr_rings; + + if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) + return true; + return false; +} + static void bnxt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) + - bnxt_get_num_tpa_ring_stats(bp); + u32 tpa_stats; if (!bp->bnapi) { j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; @@ -544,17 +580,42 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) bnxt_sw_func_stats[i].counter = 0; + tpa_stats = bnxt_get_num_tpa_ring_stats(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; __le64 *hw_stats = (__le64 *)cpr->hw_stats; + u64 *sw; int k; - for (k = 0; k < stat_fields; j++, k++) + if (is_rx_ring(bp, i)) { + for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) + buf[j] = le64_to_cpu(hw_stats[k]); + } + if (is_tx_ring(bp, i)) { + k = NUM_RING_RX_HW_STATS; + for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; + j++, k++) + buf[j] = le64_to_cpu(hw_stats[k]); + } + if (!tpa_stats || !is_rx_ring(bp, i)) + goto skip_tpa_ring_stats; + + k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; + for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + + tpa_stats; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); - buf[j++] = cpr->rx_l4_csum_errors; - buf[j++] = cpr->rx_buf_errors; - buf[j++] = cpr->missed_irqs; + +skip_tpa_ring_stats: + sw = (u64 *)&cpr->sw_stats.rx; + if (is_rx_ring(bp, i)) { + for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) + buf[j] = sw[k]; + } + + sw = (u64 *)&cpr->sw_stats.cmn; + for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) + buf[j] = sw[k]; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += le64_to_cpu(cpr->hw_stats->rx_discard_pkts); @@ -632,31 +693,48 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) switch (stringset) { case ETH_SS_STATS: for (i = 0; i < bp->cp_nr_rings; i++) { - num_str = ARRAY_SIZE(bnxt_ring_stats_str); - for (j = 0; j < num_str; j++) { - sprintf(buf, "[%d]: %s", i, - bnxt_ring_stats_str[j]); - buf += ETH_GSTRING_LEN; + if (is_rx_ring(bp, i)) { + num_str = NUM_RING_RX_HW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_rx_stats_str[j]); + buf += ETH_GSTRING_LEN; + } } - if (!BNXT_SUPPORTS_TPA(bp)) + if (is_tx_ring(bp, i)) { + num_str = NUM_RING_TX_HW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_tx_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + num_str = bnxt_get_num_tpa_ring_stats(bp); + if (!num_str || !is_rx_ring(bp, i)) goto skip_tpa_stats; - if (bp->max_tpa_v2) { - num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + if (bp->max_tpa_v2) str = bnxt_ring_tpa2_stats_str; - } else { - num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str); + else str = bnxt_ring_tpa_stats_str; - } + for (j = 0; j < num_str; j++) { sprintf(buf, "[%d]: %s", i, str[j]); buf += ETH_GSTRING_LEN; } skip_tpa_stats: - num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str); + if (is_rx_ring(bp, i)) { + num_str = NUM_RING_RX_SW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_rx_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + num_str = NUM_RING_CMN_SW_STATS; for (j = 0; j < num_str; j++) { sprintf(buf, "[%d]: %s", i, - bnxt_ring_sw_stats_str[j]); + bnxt_cmn_sw_stats_str[j]); buf += ETH_GSTRING_LEN; } } @@ -1749,8 +1827,8 @@ static int bnxt_flash_nvram(struct net_device *dev, return rc; } -static int bnxt_firmware_reset(struct net_device *dev, - u16 dir_type) +static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, + u8 self_reset, u8 flags) { struct hwrm_fw_reset_input req = {0}; struct bnxt *bp = netdev_priv(dev); @@ -1758,48 +1836,77 @@ static int bnxt_firmware_reset(struct net_device *dev, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + req.embedded_proc_type = proc_type; + req.selfrst_status = self_reset; + req.flags = flags; + + if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { + rc = hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + } else { + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == -EACCES) + bnxt_print_admin_err(bp); + } + return rc; +} + +static int bnxt_firmware_reset(struct net_device *dev, + enum bnxt_nvm_directory_type dir_type) +{ + u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; + u8 proc_type, flags = 0; + /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* (e.g. when firmware isn't already running) */ switch (dir_type) { case BNX_DIR_TYPE_CHIMP_PATCH: case BNX_DIR_TYPE_BOOTCODE: case BNX_DIR_TYPE_BOOTCODE_2: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; /* Self-reset ChiMP upon next PCIe reset: */ - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; /* Self-reset APE upon next PCIe reset: */ - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: - req.embedded_proc_type = - FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; break; case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; - break; - case BNXT_FW_RESET_CHIP: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; - if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) - req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; - break; - case BNXT_FW_RESET_AP: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; break; default: return -EINVAL; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc == -EACCES) - bnxt_print_admin_err(bp); - return rc; + return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); +} + +static int bnxt_firmware_reset_chip(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + u8 flags = 0; + + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + + return bnxt_hwrm_firmware_reset(dev, + FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, + flags); +} + +static int bnxt_firmware_reset_ap(struct net_device *dev) +{ + return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, + 0); } static int bnxt_flash_firmware(struct net_device *dev, @@ -2975,7 +3082,11 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, static int bnxt_reset(struct net_device *dev, u32 *flags) { struct bnxt *bp = netdev_priv(dev); - int rc = 0; + bool reload = false; + u32 req = *flags; + + if (!req) + return -EINVAL; if (!BNXT_PF(bp)) { netdev_err(dev, "Reset is not supported from a VF\n"); @@ -2989,33 +3100,37 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return -EBUSY; } - if (*flags == ETH_RESET_ALL) { + if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { /* This feature is not supported in older firmware versions */ - if (bp->hwrm_spec_code < 0x10803) - return -EOPNOTSUPP; - - rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); - if (!rc) { - netdev_info(dev, "Reset request successful.\n"); - if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) - netdev_info(dev, "Reload driver to complete reset\n"); - *flags = 0; + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_chip(dev)) { + netdev_info(dev, "Firmware reset request successful.\n"); + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) + reload = true; + *flags &= ~BNXT_FW_RESET_CHIP; + } + } else if (req == BNXT_FW_RESET_CHIP) { + return -EOPNOTSUPP; /* only request, fail hard */ } - } else if (*flags == ETH_RESET_AP) { - /* This feature is not supported in older firmware versions */ - if (bp->hwrm_spec_code < 0x10803) - return -EOPNOTSUPP; + } - rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); - if (!rc) { - netdev_info(dev, "Reset Application Processor request successful.\n"); - *flags = 0; + if (req & BNXT_FW_RESET_AP) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_ap(dev)) { + netdev_info(dev, "Reset application processor successful.\n"); + reload = true; + *flags &= ~BNXT_FW_RESET_AP; + } + } else if (req == BNXT_FW_RESET_AP) { + return -EOPNOTSUPP; /* only request, fail hard */ } - } else { - rc = -EINVAL; } - return rc; + if (reload) + netdev_info(dev, "Reload driver to complete reset\n"); + + return 0; } static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 3576d951727b..ce7585ff9e4d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -77,8 +77,12 @@ struct hwrm_dbg_cmn_output { #define BNXT_LED_DFLT_ENABLES(x) \ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) -#define BNXT_FW_RESET_AP 0xfffe -#define BNXT_FW_RESET_CHIP 0xffff +#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT) +#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) \ + << ETH_RESET_SHARED_SHIFT) extern const struct ethtool_ops bnxt_ethtool_ops; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 7cf27dffadb5..7e9235c8d21e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2019 Broadcom Inc. + * Copyright (c) 2018-2020 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -207,6 +207,8 @@ struct cmd_nums { #define HWRM_PORT_PHY_MDIO_READ 0xb6UL #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL + #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL + #define HWRM_PORT_ECN_QSTATS 0xbaUL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_HEALTH_CHECK 0xc2UL @@ -220,6 +222,8 @@ struct cmd_nums { #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_FW_ECN_CFG 0xcdUL + #define HWRM_FW_ECN_QCFG 0xceUL #define HWRM_EXEC_FWD_RESP 0xd0UL #define HWRM_REJECT_FWD_RESP 0xd1UL #define HWRM_FWD_RESP 0xd2UL @@ -233,6 +237,7 @@ struct cmd_nums { #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_REG_POWER_QUERY 0xe1UL #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL + #define HWRM_REG_POWER_HISTOGRAM 0xe3UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_QCFG 0xf2UL @@ -331,6 +336,7 @@ struct cmd_nums { #define HWRM_FUNC_VF_BW_CFG 0x195UL #define HWRM_FUNC_VF_BW_QCFG 0x196UL #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL + #define HWRM_FUNC_QSTATS_EXT 0x198UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -341,6 +347,31 @@ struct cmd_nums { #define HWRM_MFG_OTP_CFG 0x207UL #define HWRM_MFG_OTP_QCFG 0x208UL #define HWRM_MFG_HDMA_TEST 0x209UL + #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL + #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL + #define HWRM_TF 0x2bcUL + #define HWRM_TF_VERSION_GET 0x2bdUL + #define HWRM_TF_SESSION_OPEN 0x2c6UL + #define HWRM_TF_SESSION_ATTACH 0x2c7UL + #define HWRM_TF_SESSION_CLOSE 0x2c8UL + #define HWRM_TF_SESSION_QCFG 0x2c9UL + #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL + #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL + #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL + #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL + #define HWRM_TF_TBL_TYPE_GET 0x2d0UL + #define HWRM_TF_TBL_TYPE_SET 0x2d1UL + #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL + #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL + #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL + #define HWRM_TF_EXT_EM_OP 0x2ddUL + #define HWRM_TF_EXT_EM_CFG 0x2deUL + #define HWRM_TF_EXT_EM_QCFG 0x2dfUL + #define HWRM_TF_TCAM_SET 0x2eeUL + #define HWRM_TF_TCAM_GET 0x2efUL + #define HWRM_TF_TCAM_MOVE 0x2f0UL + #define HWRM_TF_TCAM_FREE 0x2f1UL + #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL @@ -356,6 +387,10 @@ struct cmd_nums { #define HWRM_DBG_RING_INFO_GET 0xff1cUL #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL + #define HWRM_DBG_DRV_TRACE 0xff1fUL + #define HWRM_DBG_QCAPS 0xff20UL + #define HWRM_DBG_QCFG 0xff21UL + #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL #define HWRM_NVM_VALIDATE_OPTION 0xffefUL #define HWRM_NVM_FLUSH 0xfff0UL @@ -429,8 +464,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_RSVD 12 -#define HWRM_VERSION_STR "1.10.1.12" +#define HWRM_VERSION_RSVD 33 +#define HWRM_VERSION_STR "1.10.1.33" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -482,6 +517,7 @@ struct hwrm_ver_get_output { #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL u8 roce_fw_maj_8b; u8 roce_fw_min_8b; u8 roce_fw_bld_8b; @@ -647,6 +683,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1089,7 +1126,7 @@ struct hwrm_func_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_qcaps_output (size:640b/80B) */ +/* hwrm_func_qcaps_output (size:704b/88B) */ struct hwrm_func_qcaps_output { __le16 error_code; __le16 req_type; @@ -1126,6 +1163,10 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1146,7 +1187,12 @@ struct hwrm_func_qcaps_output { __le32 max_flow_id; __le32 max_hw_ring_grps; __le16 max_sp_tx_rings; - u8 unused_0; + u8 unused_0[2]; + __le32 flags_ext; + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL + u8 unused_1[3]; u8 valid; }; @@ -1161,7 +1207,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:704b/88B) */ +/* hwrm_func_qcfg_output (size:768b/96B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -1267,7 +1313,11 @@ struct hwrm_func_qcfg_output { u8 always_1; __le32 reset_addr_poll; __le16 legacy_l2_db_size_kb; - u8 unused_2[1]; + __le16 svif_info; + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0 + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL + u8 unused_2[7]; u8 valid; }; @@ -1420,9 +1470,10 @@ struct hwrm_func_qstats_input { __le64 resp_addr; __le16 fid; u8 flags; - #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL - #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL - #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY + #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL + #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK u8 unused_0[5]; }; @@ -1456,6 +1507,53 @@ struct hwrm_func_qstats_output { u8 valid; }; +/* hwrm_func_qstats_ext_input (size:192b/24B) */ +struct hwrm_func_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 flags; + #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK + u8 unused_0[5]; +}; + +/* hwrm_func_qstats_ext_output (size:1472b/184B) */ +struct hwrm_func_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_func_clr_stats_input (size:192b/24B) */ struct hwrm_func_clr_stats_input { __le16 req_type; @@ -1808,7 +1906,7 @@ struct hwrm_func_backing_store_qcaps_output { u8 ctx_kind_initializer; __le32 rsvd; __le16 rsvd1; - u8 rsvd2; + u8 tqm_fp_rings_count; u8 valid; }; @@ -2231,7 +2329,17 @@ struct hwrm_error_recovery_qcfg_output { #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2 __le32 reset_reg_val[16]; u8 delay_after_reset[16]; - u8 unused_1[7]; + __le32 err_recovery_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2 + u8 unused_1[3]; u8 valid; }; @@ -2934,7 +3042,11 @@ struct hwrm_port_qstats_input { __le16 target_id; __le64 resp_addr; __le16 port_id; - u8 unused_0[6]; + u8 flags; + #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL + #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK + u8 unused_0[5]; __le64 tx_stat_host_addr; __le64 rx_stat_host_addr; }; @@ -3058,7 +3170,11 @@ struct hwrm_port_qstats_ext_input { __le16 port_id; __le16 tx_stat_size; __le16 rx_stat_size; - u8 unused_0[2]; + u8 flags; + #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL + #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL + #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK + u8 unused_0; __le64 tx_stat_host_addr; __le64 rx_stat_host_addr; }; @@ -3840,14 +3956,22 @@ struct hwrm_queue_pfcenable_qcfg_output { __le16 seq_id; __le16 resp_len; __le32 flags; - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL u8 unused_0[3]; u8 valid; }; @@ -3860,14 +3984,22 @@ struct hwrm_queue_pfcenable_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL __le16 port_id; u8 unused_0[2]; }; @@ -5287,7 +5419,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input { __le16 target_id; __le64 resp_addr; __le16 ring_id; - u8 unused_0[6]; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0 + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + u8 unused_0[4]; }; /* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ @@ -7618,7 +7754,9 @@ struct hwrm_nvm_modify_input { __le64 resp_addr; __le64 host_src_addr; __le16 dir_idx; - u8 unused_0[2]; + __le16 flags; + #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL + #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL __le32 offset; __le32 len; u8 unused_1[4]; @@ -8027,4 +8165,18 @@ struct hwrm_selftest_irq_output { u8 valid; }; +/* fw_status_reg (size:32b/4B) */ +struct fw_status_reg { + u32 fw_status; + #define FW_STATUS_REG_CODE_MASK 0xffffUL + #define FW_STATUS_REG_CODE_SFT 0 + #define FW_STATUS_REG_CODE_READY 0x8000UL + #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY + #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL + #define FW_STATUS_REG_RECOVERABLE 0x20000UL + #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL + #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL + #define FW_STATUS_REG_SHUTDOWN 0x100000UL +}; + #endif /* _BNXT_HSI_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 6ea3df6da18c..c883e8884faf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -651,7 +651,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) FUNC_CFG_REQ_ENABLES_NUM_VNICS | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); - mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; req.mru = cpu_to_le16(mtu); req.mtu = cpu_to_le16(mtu); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 4a316c4b3fa8..8c8368c2f335 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -104,7 +104,13 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) for (i = 0; i < num_msix; i++) { ent[i].vector = bp->irq_tbl[idx + i].vector; ent[i].ring_idx = idx + i; - ent[i].db_offset = (idx + i) * 0x80; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + ent[i].db_offset = DB_PF_OFFSET_P5; + if (BNXT_VF(bp)) + ent[i].db_offset = DB_VF_OFFSET_P5; + } else { + ent[i].db_offset = (idx + i) * 0x80; + } } } @@ -475,6 +481,8 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; edev->net = dev; edev->pdev = bp->pdev; + edev->l2_db_size = bp->db_size; + edev->l2_db_size_nc = bp->db_size; bp->edev = edev; } return bp->edev; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 9895406b9830..6b4d2556a6df 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -67,6 +67,14 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ULP_STOPPED 0x8 const struct bnxt_en_ops *en_ops; struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP]; + int l2_db_size; /* Doorbell BAR size in + * bytes mapped by L2 + * driver. + */ + int l2_db_size_nc; /* Doorbell BAR size in + * bytes mapped as non- + * cacheable. + */ }; struct bnxt_en_ops { |