diff options
author | David S. Miller <davem@davemloft.net> | 2014-03-19 23:55:48 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-19 23:55:48 -0400 |
commit | 20248162f221184790b598a88763a87170b0f08e (patch) | |
tree | 81e79217b8bc763008afca67c8f229b9c0c08e1e /drivers/net/ethernet | |
parent | 28bdc499d647124fa5844453d35e6f5d1b3810dc (diff) | |
parent | 5b346dc97567270a5c0f02a390a1d1bb65237cea (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says:
====================
Intel Wired LAN Driver Updates
This series contains updates to i40e, i40evf, e1000e, ixgbe and ixgbevf.
Mitch adds support for the VF link state ndo which allows the PF driver
to control the virtual link state of the VF devices. Added
support for viewing and modifying RSS hash options and RSS hash look-up
table programming through ethtool for i40evf. Fixed complaint about
the use of min() where min_t() should be used in i40evf.
Anjali adds support for ethtool -k option for NTUPLE control for i40e.
Elizabeth cleans up and refactors i40e_open() to separate out the VSI
code into its own i40e_vsi_open().
Jesse enables the hardware feature head write back to avoid updating the
descriptor ring by marking each descriptor with a DD bit and instead
writes a memory location with an update to where the driver should clean
up to in i40e and i40evf. Reduces context descriptors for i40e/i40evf
since we do not need context descriptors for every packet, only for
TSO or timesync.
Dan Carpenter fixes a potential array underflow in i40e_vc_process_vf_msg().
Dave fixes an e1000e hardware unit hang where the check for pending Tx work
when link is lost was mistakenly moved to be done only when link is first
detected to be lost. Fixed a problem with poor network performance on
certain silicon in e1000e when configured for 100M HDX performance.
Carolyn adds register defines needed for time sync functions and the code
to call the updated defines.
Jacob adds the ixgbe function for writing PCI config word and checks
whether the adapter has been removed first.
Mark adds the bit __IXGBEVF_REMOVING to indicate that the module is being
removed because the __IXGBEVF_DOWN bit had been overloaded for this
purpose, but leads to trouble. ixgbevf_down function can now prevent
multiple executions by doing test_and_set_bit on __IXGBEVF_DOWN.
v2:
- dropped patch Mitch's patch "i40evf: Support RSS option in ethtool"
based on feedback from Ben Hutchings so that Mitch can re-work the
patch solution
v3:
- removed unnecessary parenthesis in patch 1 based on feedback from David
Miller
- changed a macro to get the next queue to a function in patch 2 based on
feedback from David Miller
- added blank lines after variable declaration and code in two functions
in patch 6 based on feedback from David Miller
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 89 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 91 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_main.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_defines.h | 70 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_regs.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ptp.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 18 |
16 files changed, 408 insertions, 71 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index eafad410e59a..6bd1832e3f3e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1090,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work) adapter->tx_hang_recheck = true; return; } - /* Real hang detected */ adapter->tx_hang_recheck = false; + + if (er32(TDH(0)) == er32(TDT(0))) { + e_dbg("false hang detected, ignoring\n"); + return; + } + + /* Real hang detected */ netif_stop_queue(netdev); e1e_rphy(hw, MII_BMSR, &phy_status); @@ -1121,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work) eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), phy_status, phy_1000t_status, phy_ext_status, pci_status); + e1000e_dump(adapter); + /* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) e_err("Try turning off Tx pause (flow control) via ethtool\n"); @@ -2890,7 +2898,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; u64 tdba; - u32 tdlen, tarc; + u32 tdlen, tctl, tarc; /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; @@ -2927,6 +2935,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* erratum work around: set txdctl the same for both queues */ ew32(TXDCTL(1), er32(TXDCTL(0))); + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { tarc = er32(TARC(0)); /* set the speed mode bit, we'll clear it if we're not at @@ -2957,6 +2971,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* enable Report Status bit */ adapter->txd_cmd |= E1000_TXD_CMD_RS; + ew32(TCTL, tctl); + hw->mac.ops.config_collision_dist(hw); } @@ -4798,6 +4814,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) if (adapter->phy_hang_count > 1) { adapter->phy_hang_count = 0; + e_dbg("PHY appears hung - resetting\n"); schedule_work(&adapter->reset_task); } } @@ -4956,15 +4973,11 @@ static void e1000_watchdog_task(struct work_struct *work) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); - /* The link is lost so the controller stops DMA. - * If there is queued Tx work that cannot be done - * or if on an 8000ES2LAN which requires a Rx packet - * buffer work-around on link down event, reset the - * controller to flush the Tx/Rx packet buffers. - * (Do the reset outside of interrupt context). + /* 8000ES2LAN requires a Rx packet buffer work-around + * on link down event; reset the controller to flush + * the Rx packet buffer. */ - if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + if (adapter->flags & FLAG_RX_NEEDS_RESTART) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, @@ -4987,6 +5000,15 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); + /* If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. + */ + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + + /* If reset is necessary, do it outside of interrupt context. */ if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index bd1b4690a608..33cd8b67535d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -208,7 +208,7 @@ struct i40e_pf { bool fc_autoneg_status; u16 eeprom_version; - u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */ + u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_req_vfs; /* num vfs requested for this vf */ @@ -558,6 +558,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add); void i40e_fdir_check_and_reenable(struct i40e_pf *pf); int i40e_get_current_fd_count(struct i40e_pf *pf); +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -596,6 +597,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3daaf205eabc..113354214517 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -38,7 +38,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 34 +#define DRV_VERSION_BUILD 36 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -2181,6 +2181,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + /* FDIR VSI tx ring can still use RS bit and writebacks */ + if (vsi->type != I40E_VSI_FDIR) + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = ring->dma + + (ring->count * sizeof(struct i40e_tx_desc)); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for @@ -4235,7 +4240,6 @@ static int i40e_open(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - char int_name[IFNAMSIZ]; int err; /* disallow open during test */ @@ -4244,6 +4248,31 @@ static int i40e_open(struct net_device *netdev) netif_carrier_off(netdev); + err = i40e_vsi_open(vsi); + if (err) + return err; + +#ifdef CONFIG_I40E_VXLAN + vxlan_get_rx_port(netdev); +#endif + + return 0; +} + +/** + * i40e_vsi_open - + * @vsi: the VSI to open + * + * Finish initialization of the VSI. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_vsi_open(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + char int_name[IFNAMSIZ]; + int err; + /* allocate descriptors */ err = i40e_vsi_setup_tx_resources(vsi); if (err) @@ -4256,18 +4285,22 @@ static int i40e_open(struct net_device *netdev) if (err) goto err_setup_rx; + if (!vsi->netdev) { + err = EINVAL; + goto err_setup_rx; + } snprintf(int_name, sizeof(int_name) - 1, "%s-%s", - dev_driver_string(&pf->pdev->dev), netdev->name); + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs); + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; - err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs); + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; @@ -4275,10 +4308,6 @@ static int i40e_open(struct net_device *netdev) if (err) goto err_up_complete; -#ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); -#endif - return 0; err_up_complete: @@ -6409,6 +6438,39 @@ sw_init_done: } /** + * i40e_set_ntuple - set the ntuple feature flag and take action + * @pf: board private structure to initialize + * @features: the feature set that the stack is suggesting + * + * returns a bool to indicate if reset needs to happen + **/ +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) +{ + bool need_reset = false; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (features & NETIF_F_NTUPLE) { + /* Enable filters and mark for reset */ + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + need_reset = true; + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + } else { + /* turn off filters, mark for reset and clear SW filter list */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + need_reset = true; + i40e_fdir_filter_exit(pf); + } + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* if ATR was disabled it can be re-enabled. */ + if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + } + return need_reset; +} + +/** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -6418,12 +6480,19 @@ static int i40e_set_features(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + bool need_reset; if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); + need_reset = i40e_set_ntuple(pf, features); + + if (need_reset) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + return 0; } @@ -6547,6 +6616,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, + .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, #ifdef CONFIG_I40E_VXLAN .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, @@ -6594,6 +6664,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | + NETIF_F_NTUPLE | NETIF_F_RXHASH | 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 88666adb0743..851f6537a96a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -619,6 +619,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed @@ -629,6 +643,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_packets = 0; unsigned int total_bytes = 0; @@ -637,6 +652,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -647,9 +664,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) /* prevent any other reads prior to eop_desc */ read_barrier_depends(); - /* if the descriptor isn't done, no work yet to do */ - if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) break; /* clear next_to_watch to prevent false hangs */ @@ -905,6 +921,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -1931,7 +1951,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, struct i40e_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ @@ -2042,9 +2063,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 7839343b967b..02c11a7f7d29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); tx_ctx.rdylist_act = 0; + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = info->dma_ring_addr + + (info->ring_len * sizeof(struct i40e_tx_desc)); /* clear the context in the HMC */ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); @@ -1771,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; - int local_vf_id = vf_id - hw->func_caps.vf_base_id; + unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; struct i40e_vf *vf; int ret; @@ -1920,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, void i40e_vc_notify_link_state(struct i40e_pf *pf) { struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf = pf->vf; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int i; pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; - pfe.event_data.link_event.link_status = - pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; - - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + } else { + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = ls->link_speed; + } + i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), + NULL); + vf++; + } } /** @@ -2193,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, error_param: return ret; } + +/** + * i40e_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: vf identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_out; + } + + vf = &pf->vf[vf_id]; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + pfe.event_data.link_event.link_status = + pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = + pf->hw.phy.link_info.link_speed; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + pfe.event_data.link_event.link_status = true; + pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + break; + default: + ret = -EINVAL; + goto error_out; + } + /* Notify the VF of its new link state */ + i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + +error_out: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index bedf0ba21d74..389c47f396d5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -98,6 +98,8 @@ struct i40e_vf { unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ + bool link_forced; + bool link_up; /* only valid if vf link is forced */ }; void i40e_free_vfs(struct i40e_pf *pf); @@ -116,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); + void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b1d87c6a5c35..53be5f44d015 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -170,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed @@ -180,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_packets = 0; unsigned int total_bytes = 0; @@ -188,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -198,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) /* prevent any other reads prior to eop_desc */ read_barrier_depends(); - /* if the descriptor isn't done, no work yet to do */ - if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) break; /* clear next_to_watch to prevent false hangs */ @@ -432,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -1266,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, struct i40e_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ @@ -1377,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d62e27f6e83a..d381bcc4ea9f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -31,7 +31,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710 X710 Virtual Function Network Driver"; -#define DRV_VERSION "0.9.14" +#define DRV_VERSION "0.9.16" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -1140,8 +1140,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) * than CPU's. So let's be conservative and only ask for * (roughly) twice the number of vectors as there are CPU's. */ - v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; - v_budget = min(v_budget, (int)adapter->vf_res->max_vectors); + v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; + v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. @@ -1414,6 +1414,13 @@ restart_watchdog: schedule_work(&adapter->adminq_task); } +static int next_queue(struct i40evf_adapter *adapter, int j) +{ + j += 1; + + return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; +} + /** * i40evf_configure_rss - Prepare for RSS if used * @adapter: board private structure @@ -1444,15 +1451,13 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { - if (j == adapter->vsi_res->num_queue_pairs) - j = 0; - /* lut = 4-byte sliding window of 4 lut entries */ - lut = (lut << 8) | (j & - ((0x1 << 8) - 1)); - /* On i = 3, we have 4 entries in lut; write to the register */ - if ((i & 3) == 3) - wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); + j = adapter->vsi_res->num_queue_pairs; + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + lut = next_queue(adapter, j); + lut |= next_queue(adapter, j) << 8; + lut |= next_queue(adapter, j) << 16; + lut |= next_queue(adapter, j) << 24; + wr32(hw, I40E_VFQF_HLUT(i), lut); } i40e_flush(hw); } diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 393c896ac7e7..b05bf925ac72 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -43,7 +43,11 @@ #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ /* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + /* Physical Func Reset Done Indication */ #define E1000_CTRL_EXT_PFRSTD 0x00004000 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 @@ -190,7 +194,8 @@ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ @@ -528,8 +533,67 @@ #define E1000_TIMINCA_16NS_SHIFT 24 -#define E1000_TSICR_TXTS 0x00000002 -#define E1000_TSIM_TXTS 0x00000002 +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index abdd935a9dad..e9c5fdd60f54 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -40,6 +40,7 @@ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ @@ -101,6 +102,14 @@ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ #define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ #define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ #define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 9c9c141f089a..a894551ae3c0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -799,7 +799,7 @@ void igb_ptp_init(struct igb_adapter *adapter) /* Initialize the time sync interrupts for devices that support it. */ if (hw->mac.type >= e1000_82580) { - wr32(E1000_TSIM, E1000_TSIM_TXTS); + wr32(E1000_TSIM, TSYNC_INTERRUPTS); wr32(E1000_IMS, E1000_IMS_TS); } @@ -877,7 +877,7 @@ void igb_ptp_reset(struct igb_adapter *adapter) case e1000_i211: /* Enable the timer functions and interrupts. */ wr32(E1000_TSAUXC, 0x0); - wr32(E1000_TSIM, E1000_TSIM_TXTS); + wr32(E1000_TSIM, TSYNC_INTERRUPTS); wr32(E1000_IMS, E1000_IMS_TS); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index f8ebe583a2ab..7fe22542e404 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -58,7 +58,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, **/ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = hw->back; u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); u16 pcie_devctl2; @@ -84,11 +83,8 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) * 16ms to 55ms */ pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); - if (ixgbe_removed(hw->hw_addr)) - return; pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; - pci_write_config_word(adapter->pdev, - IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); + ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index d1d67ba54775..afa1cda90c2e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -133,6 +133,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); #define IXGBE_FAILED_READ_CFG_WORD 0xffffU u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); static inline bool ixgbe_removed(void __iomem *addr) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 18cd8ca319ea..c773d6cb6063 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -361,6 +361,15 @@ static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) } #endif /* CONFIG_PCI_IOV */ +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (ixgbe_removed(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 54829326bb09..08fb88aba67b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -412,7 +412,8 @@ struct ixgbevf_adapter { enum ixbgevf_state_t { __IXGBEVF_TESTING, __IXGBEVF_RESETTING, - __IXGBEVF_DOWN + __IXGBEVF_DOWN, + __IXGBEVF_REMOVING, }; struct ixgbevf_cb { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 8581079791fe..a2cba53c31be 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -608,7 +608,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) napi_complete(napi); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, 1 << q_vector->v_idx); @@ -833,7 +834,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data) hw->mac.get_link_status = 1; - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); @@ -1618,6 +1620,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); + smp_mb__before_clear_bit(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -1742,7 +1745,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) int i; /* signal that we are down to the interrupt handler */ - set_bit(__IXGBEVF_DOWN, &adapter->state); + if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) + return; /* do nothing if already down */ /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -2329,6 +2333,7 @@ static void ixgbevf_reset_task(struct work_struct *work) /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_REMOVING, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; @@ -2413,7 +2418,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work) pf_has_reset: /* Reset the timer */ - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + (2 * HZ))); @@ -3563,7 +3569,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); - set_bit(__IXGBEVF_DOWN, &adapter->state); + set_bit(__IXGBEVF_REMOVING, &adapter->state); del_timer_sync(&adapter->watchdog_timer); |