diff options
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 108 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/cgx.h | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 97 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 6 |
7 files changed, 214 insertions, 65 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 12db256c8c9f..4c94571e03eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -31,6 +31,7 @@ * @resp: command response * @link_info: link related information * @event_cb: callback for linkchange events + * @event_cb_lock: lock for serializing callback with unregister * @cmd_pend: flag set before new command is started * flag cleared after command response is received * @cgx: parent cgx port @@ -43,6 +44,7 @@ struct lmac { u64 resp; struct cgx_link_user_info link_info; struct cgx_event_cb event_cb; + spinlock_t event_cb_lock; bool cmd_pend; struct cgx *cgx; u8 lmac_id; @@ -55,6 +57,8 @@ struct cgx { u8 cgx_id; u8 lmac_count; struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct work_struct cgx_cmd_work; + struct workqueue_struct *cgx_cmd_workq; struct list_head cgx_list; }; @@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; /* Convert firmware lmac type encoding to string */ static char *cgx_lmactype_string[LMAC_MODE_MAX]; +/* CGX PHY management internal APIs */ +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); + /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, @@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) return cgx->lmac_idmap[lmac_id]; } -int cgx_get_cgx_cnt(void) +int cgx_get_cgxcnt_max(void) { struct cgx *cgx_dev; - int count = 0; + int idmax = -ENODEV; list_for_each_entry(cgx_dev, &cgx_list, cgx_list) - count++; + if (cgx_dev->cgx_id > idmax) + idmax = cgx_dev->cgx_id; + + if (idmax < 0) + return 0; - return count; + return idmax + 1; } -EXPORT_SYMBOL(cgx_get_cgx_cnt); +EXPORT_SYMBOL(cgx_get_cgxcnt_max); int cgx_get_lmac_cnt(void *cgxd) { @@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat, lmac->link_info = event.link_uinfo; linfo = &lmac->link_info; + /* Ensure callback doesn't get unregistered until we finish it */ + spin_lock(&lmac->event_cb_lock); + if (!lmac->event_cb.notify_link_chg) { dev_dbg(dev, "cgx port %d:%d Link change handler null", cgx->cgx_id, lmac->lmac_id); @@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat, dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", cgx->cgx_id, lmac->lmac_id, linfo->link_up ? "UP" : "DOWN", linfo->speed); - return; + goto err; } if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) dev_err(dev, "event notification failure\n"); +err: + spin_unlock(&lmac->event_cb_lock); } static inline bool cgx_cmdresp_is_linkevent(u64 event) @@ -548,6 +564,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) } EXPORT_SYMBOL(cgx_lmac_evh_register); +int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) +{ + struct lmac *lmac; + unsigned long flags; + struct cgx *cgx = cgxd; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + spin_lock_irqsave(&lmac->event_cb_lock, flags); + lmac->event_cb.notify_link_chg = NULL; + lmac->event_cb.data = NULL; + spin_unlock_irqrestore(&lmac->event_cb_lock, flags); + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_evh_unregister); + +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) +{ + u64 req = 0; + u64 resp; + + if (enable) + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); + else + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} + static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { u64 req = 0; @@ -581,6 +629,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx) return 0; } +static void cgx_lmac_linkup_work(struct work_struct *work) +{ + struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); + struct device *dev = &cgx->pdev->dev; + int i, err; + + /* Do Link up for all the lmacs */ + for (i = 0; i < cgx->lmac_count; i++) { + err = cgx_fwi_link_change(cgx, i, true); + if (err) + dev_info(dev, "cgx port %d:%d Link up command failed\n", + cgx->cgx_id, i); + } +} + +int cgx_lmac_linkup_start(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_linkup_start); + static int cgx_lmac_init(struct cgx *cgx) { struct lmac *lmac; @@ -602,6 +678,7 @@ static int cgx_lmac_init(struct cgx *cgx) lmac->cgx = cgx; init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); + spin_lock_init(&lmac->event_cb_lock); err = request_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), cgx_fwi_event_handler, 0, lmac->name, lmac); @@ -624,6 +701,12 @@ static int cgx_lmac_exit(struct cgx *cgx) struct lmac *lmac; int i; + if (cgx->cgx_cmd_workq) { + flush_workqueue(cgx->cgx_cmd_workq); + destroy_workqueue(cgx->cgx_cmd_workq); + cgx->cgx_cmd_workq = NULL; + } + /* Free all lmac related resources */ for (i = 0; i < cgx->lmac_count; i++) { lmac = cgx->lmac_idmap[i]; @@ -679,8 +762,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_release_regions; } + cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) + & CGX_ID_MASK; + + /* init wq for processing linkup requests */ + INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); + cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); + if (!cgx->cgx_cmd_workq) { + dev_err(dev, "alloc workqueue failed for cgx cmd"); + err = -ENOMEM; + goto err_release_regions; + } + list_add(&cgx->cgx_list, &cgx_list); - cgx->cgx_id = cgx_get_cgx_cnt() - 1; cgx_link_usertable_init(); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 3bd38ed6d68b..8c2be8493321 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -20,41 +20,41 @@ /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 -#define MAX_CGX 3 +#define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) /* Registers */ #define CGXX_CMRX_CFG 0x00 -#define CMR_EN BIT_ULL(55) -#define DATA_PKT_TX_EN BIT_ULL(53) -#define DATA_PKT_RX_EN BIT_ULL(54) -#define CGX_LMAC_TYPE_SHIFT 40 -#define CGX_LMAC_TYPE_MASK 0xF +#define CMR_EN BIT_ULL(55) +#define DATA_PKT_TX_EN BIT_ULL(53) +#define DATA_PKT_RX_EN BIT_ULL(54) +#define CGX_LMAC_TYPE_SHIFT 40 +#define CGX_LMAC_TYPE_MASK 0xF #define CGXX_CMRX_INT 0x040 -#define FW_CGX_INT BIT_ULL(1) +#define FW_CGX_INT BIT_ULL(1) #define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_STAT0 0x070 #define CGXX_CMRX_RX_LMACS 0x128 #define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 -#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) -#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) -#define CGX_DMAC_MCAST_MODE BIT_ULL(1) -#define CGX_DMAC_BCAST_MODE BIT_ULL(0) +#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) +#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE BIT_ULL(1) +#define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 0x200 -#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 -#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) +#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 #define CGXX_SCRATCH0_REG 0x1050 #define CGXX_SCRATCH1_REG 0x1058 #define CGX_CONST 0x2000 #define CGXX_SPUX_CONTROL1 0x10000 -#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) +#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) #define CGXX_GMP_PCS_MRX_CTL 0x30000 -#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) +#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) #define CGX_COMMAND_REG CGXX_SCRATCH1_REG #define CGX_EVENT_REG CGXX_SCRATCH0_REG @@ -95,11 +95,12 @@ struct cgx_event_cb { extern struct pci_driver cgx_driver; -int cgx_get_cgx_cnt(void); +int cgx_get_cgxcnt_max(void); int cgx_get_lmac_cnt(void *cgxd); void *cgx_get_pdata(int cgx_id); int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind); int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); +int cgx_lmac_evh_unregister(void *cgxd, int lmac_id); int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); @@ -109,4 +110,5 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); int cgx_get_link_info(void *cgxd, int lmac_id, struct cgx_link_user_info *linfo); +int cgx_lmac_linkup_start(void *cgxd); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index fa17af3f4ba7..2d9fe51c6616 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -78,8 +78,6 @@ enum cgx_cmd_id { CGX_CMD_LINK_STATE_CHANGE, CGX_CMD_MODE_CHANGE, /* hot plug support */ CGX_CMD_INTF_SHUTDOWN, - CGX_CMD_IRQ_ENABLE, - CGX_CMD_IRQ_DISABLE, }; /* async event ids */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 40eb3ad725f5..4d061d971956 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -811,17 +811,26 @@ init: err = rvu_npc_init(rvu); if (err) - return err; + goto exit; + + err = rvu_cgx_init(rvu); + if (err) + goto exit; err = rvu_npa_init(rvu); if (err) - return err; + goto cgx_err; err = rvu_nix_init(rvu); if (err) - return err; + goto cgx_err; return 0; + +cgx_err: + rvu_cgx_exit(rvu); +exit: + return err; } /* NPA and NIX admin queue APIs */ @@ -2419,13 +2428,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_hwsetup; - err = rvu_cgx_probe(rvu); - if (err) - goto err_mbox; - err = rvu_flr_init(rvu); if (err) - goto err_cgx; + goto err_mbox; err = rvu_register_interrupts(rvu); if (err) @@ -2441,11 +2446,10 @@ err_irq: rvu_unregister_interrupts(rvu); err_flr: rvu_flr_wq_destroy(rvu); -err_cgx: - rvu_cgx_wq_destroy(rvu); err_mbox: rvu_mbox_destroy(&rvu->afpf_wq_info); err_hwsetup: + rvu_cgx_exit(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); err_release_regions: @@ -2465,7 +2469,7 @@ static void rvu_remove(struct pci_dev *pdev) rvu_unregister_interrupts(rvu); rvu_flr_wq_destroy(rvu); - rvu_cgx_wq_destroy(rvu); + rvu_cgx_exit(rvu); rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_disable_sriov(rvu); rvu_reset_all_blocks(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index c8409bc5d9c3..ae8e2e206c87 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -226,7 +226,7 @@ struct rvu { /* CGX */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ u8 cgx_mapped_pfs; - u8 cgx_cnt; /* available cgx ports */ + u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for * every cgx lmac port @@ -316,8 +316,8 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) *lmac_id = (map & 0xF); } -int rvu_cgx_probe(struct rvu *rvu); -void rvu_cgx_wq_destroy(struct rvu *rvu); +int rvu_cgx_init(struct rvu *rvu); +int rvu_cgx_exit(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 1de6eb528d08..7d7133c5f799 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) { - if (cgx_id >= rvu->cgx_cnt) + if (cgx_id >= rvu->cgx_cnt_max) return NULL; return rvu->cgx_idmap[cgx_id]; @@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) static int rvu_map_cgx_lmac_pf(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; - int cgx_cnt = rvu->cgx_cnt; + int cgx_cnt_max = rvu->cgx_cnt_max; int cgx, lmac_cnt, lmac; int pf = PF_CGXMAP_BASE; int size, free_pkind; - if (!cgx_cnt) + if (!cgx_cnt_max) return 0; - if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF) + if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) return -EINVAL; /* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid. */ - size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8); - rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL); + size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM; - /* Initialize offset 0 with an invalid cgx and lmac id */ - rvu->pf2cgxlmac_map[0] = 0xFF; + /* Initialize all entries with an invalid cgx and lmac id */ + memset(rvu->pf2cgxlmac_map, 0xFF, size); /* Reverse map table */ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, - cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16), + cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), GFP_KERNEL); if (!rvu->cgxlmac2pf_map) return -ENOMEM; rvu->cgx_mapped_pfs = 0; - for (cgx = 0; cgx < cgx_cnt; cgx++) { + for (cgx = 0; cgx < cgx_cnt_max; cgx++) { + if (!rvu_cgx_pdata(cgx, rvu)) + continue; lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work) } while (1); } -static void cgx_lmac_event_handler_init(struct rvu *rvu) +static int cgx_lmac_event_handler_init(struct rvu *rvu) { struct cgx_event_cb cb; int cgx, lmac, err; @@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu) rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); if (!rvu->cgx_evh_wq) { dev_err(rvu->dev, "alloc workqueue failed"); - return; + return -ENOMEM; } cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ cb.data = rvu; - for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) { + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) @@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu) cgx, lmac); } } + + return 0; } -void rvu_cgx_wq_destroy(struct rvu *rvu) +static void rvu_cgx_wq_destroy(struct rvu *rvu) { if (rvu->cgx_evh_wq) { flush_workqueue(rvu->cgx_evh_wq); @@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu) } } -int rvu_cgx_probe(struct rvu *rvu) +int rvu_cgx_init(struct rvu *rvu) { - int i, err; + int cgx, err; + void *cgxd; - /* find available cgx ports */ - rvu->cgx_cnt = cgx_get_cgx_cnt(); - if (!rvu->cgx_cnt) { + /* CGX port id starts from 0 and are not necessarily contiguous + * Hence we allocate resources based on the maximum port id value. + */ + rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); + if (!rvu->cgx_cnt_max) { dev_info(rvu->dev, "No CGX devices found!\n"); return -ENODEV; } - rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *), - GFP_KERNEL); + rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * + sizeof(void *), GFP_KERNEL); if (!rvu->cgx_idmap) return -ENOMEM; /* Initialize the cgxdata table */ - for (i = 0; i < rvu->cgx_cnt; i++) - rvu->cgx_idmap[i] = cgx_get_pdata(i); + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) + rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); /* Map CGX LMAC interfaces to RVU PFs */ err = rvu_map_cgx_lmac_pf(rvu); @@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu) return err; /* Register for CGX events */ - cgx_lmac_event_handler_init(rvu); + err = cgx_lmac_event_handler_init(rvu); + if (err) + return err; + + /* Ensure event handler registration is completed, before + * we turn on the links + */ + mb(); + + /* Do link up for all CGX ports */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + err = cgx_lmac_linkup_start(cgxd); + if (err) + dev_err(rvu->dev, + "Link up process failed to start on cgx %d\n", + cgx); + } + + return 0; +} + +int rvu_cgx_exit(struct rvu *rvu) +{ + int cgx, lmac; + void *cgxd; + + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) + cgx_lmac_evh_unregister(cgxd, lmac); + } + + /* Ensure event handler unregister is completed */ + mb(); + + rvu_cgx_wq_destroy(rvu); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index b7998f6be386..962a82f7d141 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2107,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); /* Check if CGX devices are ready */ - for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) { - if (status & (BIT_ULL(16 + idx))) + for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { + /* Skip when cgx port is not available */ + if (!rvu_cgx_pdata(idx, rvu) || + (status & (BIT_ULL(16 + idx)))) continue; dev_err(rvu->dev, "CGX%d didn't respond to NIX X2P calibration\n", idx); |