diff options
author | David S. Miller <davem@davemloft.net> | 2018-04-30 09:26:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-04-30 09:26:29 -0400 |
commit | dbafb0c488ee02893764c14783e26574ee8f1a96 (patch) | |
tree | 09243248f41f1d2236d4e7fc1d428361bf374db8 /drivers | |
parent | 3ac305c386f698abccd0523c64a8aef248c89bc6 (diff) | |
parent | c33c997346c34ea7b89aec99524ad9632a2f1e0c (diff) |
Merge branch 'liquidio-enhanced-ethtool-set-channels-feature'
Intiyaz Basha says:
====================
liquidio: enhanced ethtool --set-channels feature
For the ethtool --set-channels feature, the liquidio driver currently
accepts max combined value as the queue count configured during driver
load time, where max combined count is the total count of input and output
queues. This limitation is applicable only when SR-IOV is enabled, that
is, when VFs are created for PF. If SR-IOV is not enabled, the driver can
configure max supported (64) queues.
This series of patches are for enhancing driver to accept
max supported queues for ethtool --set-channels.
Changes in V2:
Only patch #6 was changed to fix these Sparse warnings reported by kbuild
test robot:
lio_ethtool.c:848:5: warning: symbol 'lio_23xx_reconfigure_queue_count'
was not declared. Should it be static?
lio_ethtool.c:877:22: warning: incorrect type in assignment (different
base types)
lio_ethtool.c:878:22: warning: incorrect type in assignment (different
base types)
lio_ethtool.c:879:22: warning: incorrect type in assignment (different
base types)
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/lio_core.c | 158 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 263 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/lio_main.c | 270 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 206 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/octeon_device.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/octeon_device.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/octeon_network.h | 60 |
10 files changed, 528 insertions, 452 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index bc9861c90ea3..929d485a3a2f 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct) CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); } -static int cn23xx_sriov_config(struct octeon_device *oct) +int cn23xx_sriov_config(struct octeon_device *oct) { struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; u32 max_rings, total_rings, max_vfs, rings_per_vf; @@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct) break; } - if (max_rings <= num_present_cpus()) - num_pf_rings = 1; + if (oct->sriov_info.num_pf_rings) + num_pf_rings = oct->sriov_info.num_pf_rings; else num_pf_rings = num_present_cpus(); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 63b3de4f2bfe..e6f31d0d5c0b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -61,6 +61,8 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); +int cn23xx_sriov_config(struct octeon_device *oct); + int cn23xx_fw_loaded(struct octeon_device *oct); void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 844e288d60fe..6821afcdc365 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -29,6 +29,162 @@ /* OOM task polling interval */ #define LIO_OOM_POLL_INTERVAL_MS 250 +#define OCTNIC_MAX_SG MAX_SKB_FRAGS + +/** + * \brief Callback for getting interface configuration + * @param status status of request + * @param buf pointer to resp structure + */ +void lio_if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_if_cfg_context *ctx; + struct liquidio_if_cfg_resp *resp; + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (resp->status) + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", + CVM_CAST64(resp->status)); + WRITE_ONCE(ctx->cond, 1); + + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/** + * \brief Delete gather lists + * @param lio per-network private data + */ +void lio_delete_glists(struct lio *lio) +{ + struct octnic_gather *g; + int i; + + kfree(lio->glist_lock); + lio->glist_lock = NULL; + + if (!lio->glist) + return; + + for (i = 0; i < lio->oct_dev->num_iqs; i++) { + do { + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[i]); + kfree(g); + } while (g); + + if (lio->glists_virt_base && lio->glists_virt_base[i] && + lio->glists_dma_base && lio->glists_dma_base[i]) { + lio_dma_free(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + lio->glists_virt_base[i], + lio->glists_dma_base[i]); + } + } + + kfree(lio->glists_virt_base); + lio->glists_virt_base = NULL; + + kfree(lio->glists_dma_base); + lio->glists_dma_base = NULL; + + kfree(lio->glist); + lio->glist = NULL; +} + +/** + * \brief Setup gather lists + * @param lio per-network private data + */ +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) +{ + struct octnic_gather *g; + int i, j; + + lio->glist_lock = + kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); + if (!lio->glist_lock) + return -ENOMEM; + + lio->glist = + kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); + if (!lio->glist) { + kfree(lio->glist_lock); + lio->glist_lock = NULL; + return -ENOMEM; + } + + lio->glist_entry_size = + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + /* allocate memory to store virtual and dma base address of + * per glist consistent memory + */ + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), + GFP_KERNEL); + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), + GFP_KERNEL); + + if (!lio->glists_virt_base || !lio->glists_dma_base) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (i = 0; i < num_iqs; i++) { + int numa_node = dev_to_node(&oct->pci_dev->dev); + + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + lio->glists_virt_base[i] = + lio_dma_alloc(oct, + lio->glist_entry_size * lio->tx_qsize, + &lio->glists_dma_base[i]); + + if (!lio->glists_virt_base[i]) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg = lio->glists_virt_base[i] + + (j * lio->glist_entry_size); + + g->sg_dma_ptr = lio->glists_dma_base[i] + + (j * lio->glist_entry_size); + + list_add_tail(&g->list, &lio->glist[i]); + } + + if (j != lio->tx_qsize) { + lio_delete_glists(lio); + return -ENOMEM; + } + } + + return 0; +} + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); @@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) int num_ioq_vectors; int irqret, err; - oct->num_msix_irqs = num_ioqs; if (oct->msix_on) { + oct->num_msix_irqs = num_ioqs; if (OCTEON_CN23XX_PF(oct)) { num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 64c817a63a01..fff60ca20c35 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -361,7 +361,14 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - max_combined = lio->linfo.num_txpciq; + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, cn23xx_pf); + + max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); + } combined_count = oct->num_iqs; } else if (OCTEON_CN23XX_VF(oct)) { u64 reg_val = 0ULL; @@ -425,9 +432,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; + + if (octeon_allocate_ioq_vector(oct, num_ioqs)) { + dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); + return -1; + } + if (octeon_setup_interrupt(oct, num_ioqs)) { dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); - return 1; + return -1; } /* Enable Octeon device interrupts */ @@ -457,7 +470,16 @@ lio_ethtool_set_channels(struct net_device *dev, combined_count = channel->combined_count; if (OCTEON_CN23XX_PF(oct)) { - max_combined = channel->max_combined; + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, + cn23xx_pf); + + max_combined = + CFG_GET_IQ_MAX_Q(conf23_pf); + } } else if (OCTEON_CN23XX_VF(oct)) { u64 reg_val = 0ULL; u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); @@ -485,7 +507,6 @@ lio_ethtool_set_channels(struct net_device *dev, if (lio_reset_queues(dev, combined_count)) return -EINVAL; - lio_irq_reallocate_irqs(oct, combined_count); if (stopped) dev->netdev_ops->ndo_open(dev); @@ -824,12 +845,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ering->rx_jumbo_max_pending = 0; } +static int lio_23xx_reconfigure_queue_count(struct lio *lio) +{ + struct octeon_device *oct = lio->oct_dev; + struct liquidio_if_cfg_context *ctx; + u32 resp_size, ctx_size, data_size; + struct liquidio_if_cfg_resp *resp; + struct octeon_soft_command *sc; + union oct_nic_if_cfg if_cfg; + struct lio_version *vdata; + u32 ifidx_or_pfnum; + int retval; + int j; + + resp_size = sizeof(struct liquidio_if_cfg_resp); + ctx_size = sizeof(struct liquidio_if_cfg_context); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, data_size, + resp_size, ctx_size); + if (!sc) { + dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", + __func__); + return -1; + } + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + vdata = (struct lio_version *)sc->virtdptr; + + vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + ifidx_or_pfnum = oct->pf_num; + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct); + init_waitqueue_head(&ctx->wc); + + if_cfg.u64 = 0; + if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.base_queue = oct->sriov_info.pf_srn; + if_cfg.s.gmx_port_id = oct->pf_num; + + sc->iq_no = 0; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_QCOUNT_UPDATE, 0, + if_cfg.u64, 0); + sc->callback = lio_if_cfg_callback; + sc->callback_arg = sc; + sc->wait_time = LIO_IFCFG_WAIT_TIME; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, + "iq/oq config failed status: %x\n", + retval); + goto qcount_update_fail; + } + + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { + dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); + return -1; + } + + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); + goto qcount_update_fail; + } + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + lio->ifidx = ifidx_or_pfnum; + lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); + lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); + for (j = 0; j < lio->linfo.num_rxpciq; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + + for (j = 0; j < lio->linfo.num_txpciq; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + + octeon_free_soft_command(oct, sc); + dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", + lio->linfo.num_rxpciq); + + return 0; + +qcount_update_fail: + octeon_free_soft_command(oct, sc); + + return -1; +} + static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + int i, queue_count_update = 0; struct napi_struct *napi, *n; - int i, update = 0; + int ret; + + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); if (wait_for_pending_requests(oct)) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); @@ -838,7 +967,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); if (octeon_set_io_queues_off(oct)) { - dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); return -1; } @@ -851,9 +980,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) netif_napi_del(napi); if (num_qs != oct->num_iqs) { - netif_set_real_num_rx_queues(netdev, num_qs); - netif_set_real_num_tx_queues(netdev, num_qs); - update = 1; + ret = netif_set_real_num_rx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number rx failed\n"); + return ret; + } + + ret = netif_set_real_num_tx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number tx failed\n"); + return ret; + } + + /* The value of queue_count_update decides whether it is the + * queue count or the descriptor count that is being + * re-configured. + */ + queue_count_update = 1; + } + + /* Re-configuration of queues can happen in two scenarios, SRIOV enabled + * and SRIOV disabled. Few things like recreating queue zero, resetting + * glists and IRQs are required for both. For the latter, some more + * steps like updating sriov_info for the octeon device need to be done. + */ + if (queue_count_update) { + lio_delete_glists(lio); + + /* Delete mbox for PF which is SRIOV disabled because sriov_info + * will be now changed. + */ + if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) + oct->fn_list.free_mbox(oct); } for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { @@ -868,24 +1028,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) octeon_delete_instr_queue(oct, i); } + if (queue_count_update) { + /* For PF re-configure sriov related information */ + if ((OCTEON_CN23XX_PF(oct)) && + !oct->sriov_info.sriov_enabled) { + oct->sriov_info.num_pf_rings = num_qs; + if (cn23xx_sriov_config(oct)) { + dev_err(&oct->pci_dev->dev, + "Queue reset aborted: SRIOV config failed\n"); + return -1; + } + + num_qs = oct->sriov_info.num_pf_rings; + } + } + if (oct->fn_list.setup_device_regs(oct)) { dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); return -1; } - if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { - dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); - return -1; + /* The following are needed in case of queue count re-configuration and + * not for descriptor count re-configuration. + */ + if (queue_count_update) { + if (octeon_setup_instr_queues(oct)) + return -1; + + if (octeon_setup_output_queues(oct)) + return -1; + + /* Recreating mbox for PF that is SRIOV disabled */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (oct->fn_list.setup_mbox(oct)) { + dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); + return -1; + } + } + + /* Deleting and recreating IRQs whether the interface is SRIOV + * enabled or disabled. + */ + if (lio_irq_reallocate_irqs(oct, num_qs)) { + dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); + return -1; + } + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); + return -1; + } + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->droq[i]->max_count, + oct->droq[i]->pkts_credit_reg); + + /* Informing firmware about the new queue count. It is required + * for firmware to allocate more number of queues than those at + * load time. + */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (lio_23xx_reconfigure_queue_count(lio)) + return -1; + } } - /* Enable the input and output queues for this Octeon device */ - if (oct->fn_list.enable_io_queues(oct)) { - dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); + /* Once firmware is aware of the new value, queues can be recreated */ + if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { + dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); return -1; } - if (update && lio_send_queue_count_update(netdev, num_qs)) - return -1; + if (queue_count_update) { + if (lio_setup_glists(oct, lio, num_qs)) { + dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); + return -1; + } + + /* Send firmware the information about new number of queues + * if the interface is a VF or a PF that is SRIOV enabled. + */ + if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) + if (lio_send_queue_count_update(netdev, num_qs)) + return -1; + } return 0; } @@ -930,7 +1157,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev, CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, rx_count); - if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) + if (lio_reset_queues(netdev, oct->num_iqs)) goto err_lio_reset_queues; if (stopped) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index fe3edf13c8f0..ee75048b3937 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -138,33 +138,10 @@ union tx_info { * by this structure in the NIC module. */ -#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) - #define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) -/** Structure of a node in list of gather components maintained by - * NIC driver for each network device. - */ -struct octnic_gather { - /** List manipulation. Next and prev pointers. */ - struct list_head list; - - /** Size of the gather component at sg in bytes. */ - int sg_size; - - /** Number of bytes that sg was adjusted to make it 8B-aligned. */ - int adjust; - - /** Gather component that can accommodate max sized fragment list - * received from the IP layer. - */ - struct octeon_sg_entry *sg; - - dma_addr_t sg_dma_ptr; -}; - struct handshake { struct completion init; struct completion started; @@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void) */ static inline int check_txq_status(struct lio *lio) { - int numqs = lio->netdev->num_tx_queues; + int numqs = lio->netdev->real_num_tx_queues; int ret_val = 0; int q, iq; @@ -542,148 +519,6 @@ static inline int check_txq_status(struct lio *lio) } /** - * Remove the node at the head of the list. The list would be empty at - * the end of this call if there are no more nodes in the list. - */ -static inline struct list_head *list_delete_head(struct list_head *root) -{ - struct list_head *node; - - if ((root->prev == root) && (root->next == root)) - node = NULL; - else - node = root->next; - - if (node) - list_del(node); - - return node; -} - -/** - * \brief Delete gather lists - * @param lio per-network private data - */ -static void delete_glists(struct lio *lio) -{ - struct octnic_gather *g; - int i; - - kfree(lio->glist_lock); - lio->glist_lock = NULL; - - if (!lio->glist) - return; - - for (i = 0; i < lio->linfo.num_txpciq; i++) { - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist[i]); - if (g) - kfree(g); - } while (g); - - if (lio->glists_virt_base && lio->glists_virt_base[i] && - lio->glists_dma_base && lio->glists_dma_base[i]) { - lio_dma_free(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - lio->glists_virt_base[i], - lio->glists_dma_base[i]); - } - } - - kfree(lio->glists_virt_base); - lio->glists_virt_base = NULL; - - kfree(lio->glists_dma_base); - lio->glists_dma_base = NULL; - - kfree(lio->glist); - lio->glist = NULL; -} - -/** - * \brief Setup gather lists - * @param lio per-network private data - */ -static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) -{ - int i, j; - struct octnic_gather *g; - - lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), - GFP_KERNEL); - if (!lio->glist_lock) - return -ENOMEM; - - lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), - GFP_KERNEL); - if (!lio->glist) { - kfree(lio->glist_lock); - lio->glist_lock = NULL; - return -ENOMEM; - } - - lio->glist_entry_size = - ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - /* allocate memory to store virtual and dma base address of - * per glist consistent memory - */ - lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), - GFP_KERNEL); - lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), - GFP_KERNEL); - - if (!lio->glists_virt_base || !lio->glists_dma_base) { - delete_glists(lio); - return -ENOMEM; - } - - for (i = 0; i < num_iqs; i++) { - int numa_node = dev_to_node(&oct->pci_dev->dev); - - spin_lock_init(&lio->glist_lock[i]); - - INIT_LIST_HEAD(&lio->glist[i]); - - lio->glists_virt_base[i] = - lio_dma_alloc(oct, - lio->glist_entry_size * lio->tx_qsize, - &lio->glists_dma_base[i]); - - if (!lio->glists_virt_base[i]) { - delete_glists(lio); - return -ENOMEM; - } - - for (j = 0; j < lio->tx_qsize; j++) { - g = kzalloc_node(sizeof(*g), GFP_KERNEL, - numa_node); - if (!g) - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg = lio->glists_virt_base[i] + - (j * lio->glist_entry_size); - - g->sg_dma_ptr = lio->glists_dma_base[i] + - (j * lio->glist_entry_size); - - list_add_tail(&g->list, &lio->glist[i]); - } - - if (j != lio->tx_qsize) { - delete_glists(lio); - return -ENOMEM; - } - } - - return 0; -} - -/** * \brief Print link information * @param netdev network device */ @@ -1471,7 +1306,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_rx_oom_poll_fn(netdev); - delete_glists(lio); + lio_delete_glists(lio); free_netdev(netdev); @@ -1686,7 +1521,7 @@ static void free_netsgbuf(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); spin_unlock(&lio->glist_lock[iq]); @@ -1729,7 +1564,7 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1942,39 +1777,6 @@ static int load_firmware(struct octeon_device *oct) } /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -static void if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), - void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_resp *resp; - struct liquidio_if_cfg_context *ctx; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n", - CVM_CAST64(resp->status), status); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Poll routine for checking transmit queue status * @param work work_struct data structure */ @@ -2049,11 +1851,6 @@ static int liquidio_open(struct net_device *netdev) ifstate_set(lio, LIO_IFSTATE_RUNNING); - /* Ready for link status updates */ - lio->intf_open = 1; - - netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - if (OCTEON_CN23XX_PF(oct)) { if (!oct->msix_on) if (setup_tx_poll_fn(netdev)) @@ -2063,7 +1860,12 @@ static int liquidio_open(struct net_device *netdev) return -1; } - start_txqs(netdev); + netif_tx_start_all_queues(netdev); + + /* Ready for link status updates */ + lio->intf_open = 1; + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2086,11 +1888,15 @@ static int liquidio_stop(struct net_device *netdev) ifstate_reset(lio, LIO_IFSTATE_RUNNING); - netif_tx_disable(netdev); + /* Stop any link updates */ + lio->intf_open = 0; + + stop_txqs(netdev); /* Inform that netif carrier is down */ netif_carrier_off(netdev); - lio->intf_open = 0; + netif_tx_disable(netdev); + lio->linfo.link.s.link_up = 0; lio->link_changes++; @@ -2530,7 +2336,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - q_idx = skb_iq(lio, skb); + q_idx = skb_iq(oct, skb); tag = q_idx; iq_no = lio->linfo.txpciq[q_idx].s.q_no; @@ -2623,7 +2429,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) spin_lock(&lio->glist_lock[q_idx]); g = (struct octnic_gather *) - list_delete_head(&lio->glist[q_idx]); + lio_list_delete_head(&lio->glist[q_idx]); spin_unlock(&lio->glist_lock[q_idx]); if (!g) { @@ -3496,6 +3302,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) struct liquidio_if_cfg_resp *resp; struct octdev_props *props; int retval, num_iqueues, num_oqueues; + int max_num_queues = 0; union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; @@ -3576,9 +3383,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = if_cfg_callback; + sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; - sc->wait_time = 3000; + sc->wait_time = LIO_IFCFG_WAIT_TIME; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -3632,11 +3439,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) resp->cfg_info.oqmask); goto setup_nic_dev_fail; } + + if (OCTEON_CN6XXX(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn6xxx)); + } else if (OCTEON_CN23XX_PF(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn23xx_pf)); + } + dev_dbg(&octeon_dev->pci_dev->dev, - "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, - num_iqueues, num_oqueues); - netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); + num_iqueues, num_oqueues, max_num_queues); + netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); @@ -3651,6 +3467,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->netdev_ops = &lionetdevops; SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); + retval = netif_set_real_num_rx_queues(netdev, num_oqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number rx failed\n"); + goto setup_nic_dev_fail; + } + + retval = netif_set_real_num_tx_queues(netdev, num_iqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number tx failed\n"); + goto setup_nic_dev_fail; + } + lio = GET_LIO(netdev); memset(lio, 0, sizeof(struct lio)); @@ -3772,7 +3602,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glists(octeon_dev, lio, num_iqueues)) { + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -4271,7 +4101,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) } atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); - if (octeon_allocate_ioq_vector(octeon_dev)) { + if (octeon_allocate_ioq_vector + (octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) { dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); return 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b7b91d15ce3f..08b682b1c8ad 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -69,30 +69,10 @@ union tx_info { } s; }; -#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) - #define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) -struct octnic_gather { - /* List manipulation. Next and prev pointers. */ - struct list_head list; - - /* Size of the gather component at sg in bytes. */ - int sg_size; - - /* Number of bytes that sg was adjusted to make it 8B-aligned. */ - int adjust; - - /* Gather component that can accommodate max sized fragment list - * received from the IP layer. - */ - struct octeon_sg_entry *sg; - - dma_addr_t sg_dma_ptr; -}; - static int liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void liquidio_vf_remove(struct pci_dev *pdev); @@ -285,142 +265,6 @@ static struct pci_driver liquidio_vf_pci_driver = { }; /** - * Remove the node at the head of the list. The list would be empty at - * the end of this call if there are no more nodes in the list. - */ -static struct list_head *list_delete_head(struct list_head *root) -{ - struct list_head *node; - - if ((root->prev == root) && (root->next == root)) - node = NULL; - else - node = root->next; - - if (node) - list_del(node); - - return node; -} - -/** - * \brief Delete gather lists - * @param lio per-network private data - */ -static void delete_glists(struct lio *lio) -{ - struct octnic_gather *g; - int i; - - kfree(lio->glist_lock); - lio->glist_lock = NULL; - - if (!lio->glist) - return; - - for (i = 0; i < lio->linfo.num_txpciq; i++) { - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist[i]); - kfree(g); - } while (g); - - if (lio->glists_virt_base && lio->glists_virt_base[i] && - lio->glists_dma_base && lio->glists_dma_base[i]) { - lio_dma_free(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - lio->glists_virt_base[i], - lio->glists_dma_base[i]); - } - } - - kfree(lio->glists_virt_base); - lio->glists_virt_base = NULL; - - kfree(lio->glists_dma_base); - lio->glists_dma_base = NULL; - - kfree(lio->glist); - lio->glist = NULL; -} - -/** - * \brief Setup gather lists - * @param lio per-network private data - */ -static int setup_glists(struct lio *lio, int num_iqs) -{ - struct octnic_gather *g; - int i, j; - - lio->glist_lock = - kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); - if (!lio->glist_lock) - return -ENOMEM; - - lio->glist = - kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); - if (!lio->glist) { - kfree(lio->glist_lock); - lio->glist_lock = NULL; - return -ENOMEM; - } - - lio->glist_entry_size = - ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - /* allocate memory to store virtual and dma base address of - * per glist consistent memory - */ - lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), - GFP_KERNEL); - lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), - GFP_KERNEL); - - if (!lio->glists_virt_base || !lio->glists_dma_base) { - delete_glists(lio); - return -ENOMEM; - } - - for (i = 0; i < num_iqs; i++) { - spin_lock_init(&lio->glist_lock[i]); - - INIT_LIST_HEAD(&lio->glist[i]); - - lio->glists_virt_base[i] = - lio_dma_alloc(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - &lio->glists_dma_base[i]); - - if (!lio->glists_virt_base[i]) { - delete_glists(lio); - return -ENOMEM; - } - - for (j = 0; j < lio->tx_qsize; j++) { - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg = lio->glists_virt_base[i] + - (j * lio->glist_entry_size); - - g->sg_dma_ptr = lio->glists_dma_base[i] + - (j * lio->glist_entry_size); - - list_add_tail(&g->list, &lio->glist[i]); - } - - if (j != lio->tx_qsize) { - delete_glists(lio); - return -ENOMEM; - } - } - - return 0; -} - -/** * \brief Print link information * @param netdev network device */ @@ -856,7 +700,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_link_status_change_wq(netdev); - delete_glists(lio); + lio_delete_glists(lio); free_netdev(netdev); @@ -1005,7 +849,7 @@ static void free_netsgbuf(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1049,7 +893,7 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1059,38 +903,6 @@ static void free_netsgbuf_with_resp(void *buf) } /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -static void if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - struct liquidio_if_cfg_resp *resp; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Net device open for LiquidIO * @param netdev network device */ @@ -1595,7 +1407,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - q_idx = skb_iq(lio, skb); + q_idx = skb_iq(lio->oct_dev, skb); tag = q_idx; iq_no = lio->linfo.txpciq[q_idx].s.q_no; @@ -1676,8 +1488,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) int i, frags; spin_lock(&lio->glist_lock[q_idx]); - g = (struct octnic_gather *)list_delete_head( - &lio->glist[q_idx]); + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[q_idx]); spin_unlock(&lio->glist_lock[q_idx]); if (!g) { @@ -2171,7 +1983,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = if_cfg_callback; + sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 5000; @@ -2336,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glists(lio, num_iqueues)) { + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -2527,7 +2339,7 @@ static int octeon_device_init(struct octeon_device *oct) } atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); - if (octeon_allocate_ioq_vector(oct)) { + if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); return 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 2166744b6812..026570499dcf 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -84,6 +84,7 @@ enum octeon_tag_type { #define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_VF_DRV_NOTICE 0x0A #define OPCODE_NIC_INTRMOD_PARAMS 0x0B +#define OPCODE_NIC_QCOUNT_UPDATE 0x12 #define OPCODE_NIC_SET_TRUSTED_VF 0x13 #define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 #define VF_DRV_LOADED 1 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f38abf626412..f878a552fef3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct) } int -octeon_allocate_ioq_vector(struct octeon_device *oct) +octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs) { - int i, num_ioqs = 0; struct octeon_ioq_vector *ioq_vector; int cpu_num; int size; - - if (OCTEON_CN23XX_PF(oct)) - num_ioqs = oct->sriov_info.num_pf_rings; - else if (OCTEON_CN23XX_VF(oct)) - num_ioqs = oct->sriov_info.rings_per_vf; + int i; size = sizeof(struct octeon_ioq_vector) * num_ioqs; oct->ioq_vector = vzalloc(size); if (!oct->ioq_vector) - return 1; + return -1; for (i = 0; i < num_ioqs; i++) { ioq_vector = &oct->ioq_vector[i]; ioq_vector->oct_dev = oct; @@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) else ioq_vector->ioq_num = i; } + return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 91937cc5c1d7..9430c0a0bb8c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -867,7 +867,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); struct octeon_config *octeon_get_conf(struct octeon_device *oct); void octeon_free_ioq_vector(struct octeon_device *oct); -int octeon_allocate_ioq_vector(struct octeon_device *oct); +int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs); void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq); /* LiquidIO driver pivate flags */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index d090edd1a4a5..8571f11e3c8f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp { u64 status; }; +#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ + +/* Structure of a node in list of gather components maintained by + * NIC driver for each network device. + */ +struct octnic_gather { + /* List manipulation. Next and prev pointers. */ + struct list_head list; + + /* Size of the gather component at sg in bytes. */ + int sg_size; + + /* Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /* Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct octeon_sg_entry *sg; + + dma_addr_t sg_dma_ptr; +}; + struct oct_nic_stats_resp { u64 rh; struct oct_link_stats stats; @@ -199,6 +222,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); */ void liquidio_set_ethtool_ops(struct net_device *netdev); +void lio_if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), + void *buf); + +void lio_delete_glists(struct lio *lio); + +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); + /** * \brief Net device change_mtu * @param netdev network device @@ -517,7 +548,7 @@ static inline void stop_txqs(struct net_device *netdev) { int i; - for (i = 0; i < netdev->num_tx_queues; i++) + for (i = 0; i < netdev->real_num_tx_queues; i++) netif_stop_subqueue(netdev, i); } @@ -530,7 +561,7 @@ static inline void wake_txqs(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); int i, qno; - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; if (__netif_subqueue_stopped(netdev, i)) { @@ -551,14 +582,33 @@ static inline void start_txqs(struct net_device *netdev) int i; if (lio->linfo.link.s.link_up) { - for (i = 0; i < netdev->num_tx_queues; i++) + for (i = 0; i < netdev->real_num_tx_queues; i++) netif_start_subqueue(netdev, i); } } -static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb) +{ + return skb->queue_mapping % oct->num_iqs; +} + +/** + * Remove the node at the head of the list. The list would be empty at + * the end of this call if there are no more nodes in the list. + */ +static inline struct list_head *lio_list_delete_head(struct list_head *root) { - return skb->queue_mapping % lio->linfo.num_txpciq; + struct list_head *node; + + if (root->prev == root && root->next == root) + node = NULL; + else + node = root->next; + + if (node) + list_del(node); + + return node; } #endif |