summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c101
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c719
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h11
10 files changed, 1025 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 1a1776a2a5a1..8103e61e67f9 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -12,29 +12,113 @@
#include <linux/compiler.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
+#include <linux/workqueue.h>
#include <linux/aer.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/bitmap.h>
+#include <linux/if_bridge.h>
#include "ice_devids.h"
#include "ice_type.h"
+#include "ice_txrx.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
#define ICE_BAR0 0
+#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
+#define ICE_MIN_MSIX 2
+#define ICE_MAX_VSI_ALLOC 130
+#define ICE_MAX_TXQS 2048
+#define ICE_MAX_RXQS 2048
+#define ICE_RES_VALID_BIT 0x8000
+#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+struct ice_res_tracker {
+ u16 num_entries;
+ u16 search_hint;
+ u16 list[1];
+};
+
+struct ice_sw {
+ struct ice_pf *pf;
+ u16 sw_id; /* switch ID for this switch */
+ u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
+};
+
enum ice_state {
__ICE_DOWN,
+ __ICE_PFR_REQ, /* set by driver and peers */
+ __ICE_ADMINQ_EVENT_PENDING,
+ __ICE_SERVICE_SCHED,
__ICE_STATE_NBITS /* must be last */
};
+/* struct that defines a VSI, associated with a dev */
+struct ice_vsi {
+ struct net_device *netdev;
+ struct ice_port_info *port_info; /* back pointer to port_info */
+ u16 vsi_num; /* HW (absolute) index of this VSI */
+} ____cacheline_internodealigned_in_smp;
+
+enum ice_pf_flags {
+ ICE_FLAG_MSIX_ENA,
+ ICE_FLAG_FLTR_SYNC,
+ ICE_FLAG_RSS_ENA,
+ ICE_PF_FLAGS_NBITS /* must be last */
+};
+
struct ice_pf {
struct pci_dev *pdev;
+ struct msix_entry *msix_entries;
+ struct ice_res_tracker *irq_tracker;
+ struct ice_vsi **vsi; /* VSIs created by the driver */
+ struct ice_sw *first_sw; /* first switch created by firmware */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
+ DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
+ DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
+ DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
+ unsigned long serv_tmr_period;
+ unsigned long serv_tmr_prev;
+ struct timer_list serv_tmr;
+ struct work_struct serv_task;
+ struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
+ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
+ u32 oicr_idx; /* Other interrupt cause vector index */
+ u32 num_lan_msix; /* Total MSIX vectors for base driver */
+ u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */
+ u16 num_lan_tx; /* num lan tx queues setup */
+ u16 num_lan_rx; /* num lan rx queues setup */
+ u16 q_left_tx; /* remaining num tx queues left unclaimed */
+ u16 q_left_rx; /* remaining num rx queues left unclaimed */
+ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
+ u16 num_alloc_vsi;
+
struct ice_hw hw;
+ char int_name[ICE_INT_NAME_STR_LEN];
};
+
+/**
+ * ice_irq_dynamic_ena - Enable default interrupt generation settings
+ * @hw: pointer to hw struct
+ */
+static inline void ice_irq_dynamic_ena(struct ice_hw *hw)
+{
+ u32 vector = ((struct ice_pf *)hw->back)->oicr_idx;
+ int itr = ICE_ITR_NONE;
+ u32 val;
+
+ /* clear the PBA here, as this function is meant to clean out all
+ * previous interrupts and enable the interrupt
+ */
+ val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (itr << GLINT_DYN_CTL_ITR_INDX_S);
+
+ wr32(hw, GLINT_DYN_CTL(vector), val);
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5061240996b1..f0837e277b2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -583,11 +583,13 @@ struct ice_aq_desc {
/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
#define ICE_AQ_LG_BUF 512
+#define ICE_AQ_FLAG_ERR_S 2
#define ICE_AQ_FLAG_LB_S 9
#define ICE_AQ_FLAG_RD_S 10
#define ICE_AQ_FLAG_BUF_S 12
#define ICE_AQ_FLAG_SI_S 13
+#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index ef5bab25c2f5..a4ce8a87fb0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -282,6 +282,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
+ /* set these values to minimum allowed */
+ hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
+ hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
+ hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
+ hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
+
status = ice_init_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index e0a7aa1dd943..dd4473e84ebb 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,6 +17,9 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending);
+enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index b32c0fc04bc9..5909a4407e38 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -963,3 +963,104 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
desc->opcode = cpu_to_le16(opcode);
desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
}
+
+/**
+ * ice_clean_rq_elem
+ * @hw: pointer to the hw struct
+ * @cq: pointer to the specific Control queue
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'.
+ */
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending)
+{
+ u16 ntc = cq->rq.next_to_clean;
+ enum ice_status ret_code = 0;
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ memset(&e->desc, 0, sizeof(e->desc));
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive queue not initialized.\n");
+ ret_code = ICE_ERR_AQ_EMPTY;
+ goto clean_rq_elem_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = ICE_ERR_AQ_NO_WORK;
+ goto clean_rq_elem_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = ICE_CTL_Q_DESC(cq->rq, ntc);
+ desc_idx = ntc;
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & ICE_AQ_FLAG_ERR) {
+ ret_code = ICE_ERR_AQ_ERROR;
+ cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive Queue Event received with error 0x%x\n",
+ cq->rq_last_status);
+ }
+ memcpy(&e->desc, desc, sizeof(e->desc));
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf && e->msg_len)
+ memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
+
+ ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
+
+ ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
+ cq->rq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message size
+ */
+ bi = &cq->rq.r.rq_bi[ntc];
+ memset(desc, 0, sizeof(*desc));
+
+ desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16(bi->size);
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, cq->rq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == cq->num_rq_entries)
+ ntc = 0;
+ cq->rq.next_to_clean = ntc;
+ cq->rq.next_to_use = ntu;
+
+clean_rq_elem_out:
+ /* Set pending if needed, unlock and return */
+ if (pending)
+ *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+clean_rq_elem_err:
+ mutex_unlock(&cq->rq_lock);
+
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index b5306e8a5a0d..ea02b89243e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -67,6 +67,14 @@ struct ice_sq_cd {
#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
+/* rq event information */
+struct ice_rq_event_info {
+ struct ice_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 893d5e967e66..446a8bbef488 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -14,6 +14,12 @@
#define PF_FW_ARQLEN 0x00080280
#define PF_FW_ARQLEN_ARQLEN_S 0
#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQVFE_S 28
+#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
+#define PF_FW_ARQLEN_ARQOVFL_S 29
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
+#define PF_FW_ARQLEN_ARQCRIT_S 30
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
#define PF_FW_ARQLEN_ARQENABLE_S 31
#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
#define PF_FW_ARQT 0x00080480
@@ -25,6 +31,12 @@
#define PF_FW_ATQLEN 0x00080200
#define PF_FW_ATQLEN_ATQLEN_S 0
#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQVFE_S 28
+#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
+#define PF_FW_ATQLEN_ATQOVFL_S 29
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
+#define PF_FW_ATQLEN_ATQCRIT_S 30
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
#define PF_FW_ATQLEN_ATQENABLE_S 31
#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
#define PF_FW_ATQT 0x00080400
@@ -43,6 +55,57 @@
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_S 0
#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
+#define PFHMC_ERRORDATA 0x00520500
+#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
+#define GLINT_DYN_CTL_INTENA_S 0
+#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
+#define GLINT_DYN_CTL_CLEARPBA_S 1
+#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
+#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
+#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
+#define GLINT_DYN_CTL_INTENA_MSK_S 31
+#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S)
+#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define PFINT_FW_CTL 0x0016C800
+#define PFINT_FW_CTL_MSIX_INDX_S 0
+#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S)
+#define PFINT_FW_CTL_ITR_INDX_S 11
+#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S)
+#define PFINT_FW_CTL_CAUSE_ENA_S 30
+#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
+#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_INTEVENT_S 0
+#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
+#define PFINT_OICR_HLP_RDY_S 14
+#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
+#define PFINT_OICR_CPM_RDY_S 15
+#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
+#define PFINT_OICR_ECC_ERR_S 16
+#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
+#define PFINT_OICR_MAL_DETECT_S 19
+#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S)
+#define PFINT_OICR_GRST_S 20
+#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
+#define PFINT_OICR_PCI_EXCEPTION_S 21
+#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
+#define PFINT_OICR_GPIO_S 22
+#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
+#define PFINT_OICR_STORM_DETECT_S 24
+#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
+#define PFINT_OICR_HMC_ERR_S 26
+#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
+#define PFINT_OICR_PE_CRITERR_S 28
+#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S)
+#define PFINT_OICR_CTL 0x0016CA80
+#define PFINT_OICR_CTL_MSIX_INDX_S 0
+#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S)
+#define PFINT_OICR_CTL_ITR_INDX_S 11
+#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S)
+#define PFINT_OICR_CTL_CAUSE_ENA_S 30
+#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
+#define PFINT_OICR_ENA 0x0016C900
#define GLLAN_RCTL_0 0x002941F8
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_S 6
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 953be26054ca..d93eaae5dc60 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -26,6 +26,294 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
+static struct workqueue_struct *ice_wq;
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = res->search_hint;
+ int end = start;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->num_entries)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ if (end == res->num_entries)
+ end = 0;
+
+ res->search_hint = end;
+ return start;
+ }
+ } while (1);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ * The search_hint trick and lack of advanced fit-finding only works
+ * because we're highly likely to have all the same sized requests.
+ * Linear search time and any fragmentation should be minimal.
+ */
+static int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int ret;
+
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ /* search based on search_hint */
+ ret = ice_search_res(res, needed, id);
+
+ if (ret < 0) {
+ /* previous search failed. Reset search hint and try again */
+ res->search_hint = 0;
+ ret = ice_search_res(res, needed, id);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ * Returns number of resources freed
+ */
+static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->num_entries)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * __ice_clean_ctrlq - helper function to clean controlq rings
+ * @pf: ptr to struct ice_pf
+ * @q_type: specific Control queue type
+ */
+static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
+{
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_ctl_q_info *cq;
+ u16 pending, i = 0;
+ const char *qtype;
+ u32 oldval, val;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ qtype = "Admin";
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
+ q_type);
+ return 0;
+ }
+
+ /* check for error indications - PF_xx_AxQLEN register layout for
+ * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
+ */
+ val = rd32(hw, cq->rq.len);
+ if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
+ PF_FW_ARQLEN_ARQCRIT_M)) {
+ oldval = val;
+ if (val & PF_FW_ARQLEN_ARQVFE_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue VF Error detected\n", qtype);
+ if (val & PF_FW_ARQLEN_ARQOVFL_M) {
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue Overflow Error detected\n",
+ qtype);
+ }
+ if (val & PF_FW_ARQLEN_ARQCRIT_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue Critical Error detected\n",
+ qtype);
+ val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
+ PF_FW_ARQLEN_ARQCRIT_M);
+ if (oldval != val)
+ wr32(hw, cq->rq.len, val);
+ }
+
+ val = rd32(hw, cq->sq.len);
+ if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
+ PF_FW_ATQLEN_ATQCRIT_M)) {
+ oldval = val;
+ if (val & PF_FW_ATQLEN_ATQVFE_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Send Queue VF Error detected\n", qtype);
+ if (val & PF_FW_ATQLEN_ATQOVFL_M) {
+ dev_dbg(&pf->pdev->dev,
+ "%s Send Queue Overflow Error detected\n",
+ qtype);
+ }
+ if (val & PF_FW_ATQLEN_ATQCRIT_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Send Queue Critical Error detected\n",
+ qtype);
+ val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
+ PF_FW_ATQLEN_ATQCRIT_M);
+ if (oldval != val)
+ wr32(hw, cq->sq.len, val);
+ }
+
+ event.buf_len = cq->rq_buf_size;
+ event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
+ GFP_KERNEL);
+ if (!event.msg_buf)
+ return 0;
+
+ do {
+ enum ice_status ret;
+
+ ret = ice_clean_rq_elem(hw, cq, &event, &pending);
+ if (ret == ICE_ERR_AQ_NO_WORK)
+ break;
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "%s Receive Queue event error %d\n", qtype,
+ ret);
+ break;
+ }
+ } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
+
+ devm_kfree(&pf->pdev->dev, event.msg_buf);
+
+ return pending && (i == ICE_DFLT_IRQ_WORK);
+}
+
+/**
+ * ice_clean_adminq_subtask - clean the AdminQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_adminq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
+ return;
+
+ clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+
+ /* re-enable Admin queue interrupt causes */
+ val = rd32(hw, PFINT_FW_CTL);
+ wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_service_task_schedule - schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue.
+ */
+static void ice_service_task_schedule(struct ice_pf *pf)
+{
+ if (!test_bit(__ICE_DOWN, pf->state) &&
+ !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
+ queue_work(ice_wq, &pf->serv_task);
+}
+
+/**
+ * ice_service_task_complete - finish up the service task
+ * @pf: board private structure
+ */
+static void ice_service_task_complete(struct ice_pf *pf)
+{
+ WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
+
+ /* force memory (pf->state) to sync before next service task */
+ smp_mb__before_atomic();
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
+ * ice_service_timer - timer callback to schedule service task
+ * @t: pointer to timer_list
+ */
+static void ice_service_timer(struct timer_list *t)
+{
+ struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+
+ mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
+ ice_service_task_schedule(pf);
+}
+
+/**
+ * ice_service_task - manage and run subtasks
+ * @work: pointer to work_struct contained by the PF struct
+ */
+static void ice_service_task(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+ unsigned long start_time = jiffies;
+
+ /* subtasks */
+ ice_clean_adminq_subtask(pf);
+
+ /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
+ ice_service_task_complete(pf);
+
+ /* If the tasks have taken longer than one service timer period
+ * or there is more work to be done, reset the service timer to
+ * schedule the service task now.
+ */
+ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
+ test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
+ mod_timer(&pf->serv_tmr, jiffies);
+}
+
/**
* ice_set_ctrlq_len - helper function to set controlq length
* @hw: pointer to the hw instance
@@ -39,6 +327,361 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
}
/**
+ * ice_ena_misc_vector - enable the non-queue interrupts
+ * @pf: board private structure
+ */
+static void ice_ena_misc_vector(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ /* clear things first */
+ wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
+ rd32(hw, PFINT_OICR); /* read to clear */
+
+ val = (PFINT_OICR_HLP_RDY_M |
+ PFINT_OICR_CPM_RDY_M |
+ PFINT_OICR_ECC_ERR_M |
+ PFINT_OICR_MAL_DETECT_M |
+ PFINT_OICR_GRST_M |
+ PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_GPIO_M |
+ PFINT_OICR_STORM_DETECT_M |
+ PFINT_OICR_HMC_ERR_M);
+
+ wr32(hw, PFINT_OICR_ENA, val);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+}
+
+/**
+ * ice_misc_intr - misc interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
+{
+ struct ice_pf *pf = (struct ice_pf *)data;
+ struct ice_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
+ u32 oicr, ena_mask;
+
+ set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+
+ oicr = rd32(hw, PFINT_OICR);
+ ena_mask = rd32(hw, PFINT_OICR_ENA);
+
+ if (!(oicr & PFINT_OICR_INTEVENT_M))
+ goto ena_intr;
+
+ if (oicr & PFINT_OICR_HMC_ERR_M) {
+ ena_mask &= ~PFINT_OICR_HMC_ERR_M;
+ dev_dbg(&pf->pdev->dev,
+ "HMC Error interrupt - info 0x%x, data 0x%x\n",
+ rd32(hw, PFHMC_ERRORINFO),
+ rd32(hw, PFHMC_ERRORDATA));
+ }
+
+ /* Report and mask off any remaining unexpected interrupts */
+ oicr &= ena_mask;
+ if (oicr) {
+ dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
+ oicr);
+ /* If a critical error is pending there is no choice but to
+ * reset the device.
+ */
+ if (oicr & (PFINT_OICR_PE_CRITERR_M |
+ PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_ECC_ERR_M))
+ set_bit(__ICE_PFR_REQ, pf->state);
+
+ ena_mask &= ~oicr;
+ }
+ ret = IRQ_HANDLED;
+
+ena_intr:
+ /* re-enable interrupt causes that are not handled during this pass */
+ wr32(hw, PFINT_OICR_ENA, ena_mask);
+ if (!test_bit(__ICE_DOWN, pf->state)) {
+ ice_service_task_schedule(pf);
+ ice_irq_dynamic_ena(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_free_irq_msix_misc - Unroll misc vector setup
+ * @pf: board private structure
+ */
+static void ice_free_irq_msix_misc(struct ice_pf *pf)
+{
+ /* disable OICR interrupt */
+ wr32(&pf->hw, PFINT_OICR_ENA, 0);
+ ice_flush(&pf->hw);
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
+ synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+ devm_free_irq(&pf->pdev->dev,
+ pf->msix_entries[pf->oicr_idx].vector, pf);
+ }
+
+ ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
+}
+
+/**
+ * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * when in MSI or Legacy interrupt mode.
+ */
+static int ice_req_irq_msix_misc(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int oicr_idx, err = 0;
+ u8 itr_gran;
+ u32 val;
+
+ if (!pf->int_name[0])
+ snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
+ /* reserve one vector in irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ if (oicr_idx < 0)
+ return oicr_idx;
+
+ pf->oicr_idx = oicr_idx;
+
+ err = devm_request_irq(&pf->pdev->dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, 0, pf->int_name, pf);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "devm_request_irq for %s failed: %d\n",
+ pf->int_name, err);
+ ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ return err;
+ }
+
+ ice_ena_misc_vector(pf);
+
+ val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
+ PFINT_OICR_CTL_CAUSE_ENA_M;
+ wr32(hw, PFINT_OICR_CTL, val);
+
+ /* This enables Admin queue Interrupt causes */
+ val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
+ PFINT_FW_CTL_CAUSE_ENA_M;
+ wr32(hw, PFINT_FW_CTL, val);
+
+ itr_gran = hw->itr_gran_200;
+
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+ ITR_TO_REG(ICE_ITR_8K, itr_gran));
+
+ ice_flush(hw);
+ ice_irq_dynamic_ena(hw);
+
+ return 0;
+}
+
+/**
+ * ice_determine_q_usage - Calculate queue distribution
+ * @pf: board private structure
+ *
+ * Return -ENOMEM if we don't get enough queues for all ports
+ */
+static void ice_determine_q_usage(struct ice_pf *pf)
+{
+ u16 q_left_tx, q_left_rx;
+
+ q_left_tx = pf->hw.func_caps.common_cap.num_txq;
+ q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
+
+ /* initial support for only 1 tx and 1 rx queue */
+ pf->num_lan_tx = 1;
+ pf->num_lan_rx = 1;
+
+ pf->q_left_tx = q_left_tx - pf->num_lan_tx;
+ pf->q_left_rx = q_left_rx - pf->num_lan_rx;
+}
+
+/**
+ * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
+ * @pf: board private structure to initialize
+ */
+static void ice_deinit_pf(struct ice_pf *pf)
+{
+ if (pf->serv_tmr.function)
+ del_timer_sync(&pf->serv_tmr);
+ if (pf->serv_task.func)
+ cancel_work_sync(&pf->serv_task);
+ mutex_destroy(&pf->sw_mutex);
+ mutex_destroy(&pf->avail_q_mutex);
+}
+
+/**
+ * ice_init_pf - Initialize general software structures (struct ice_pf)
+ * @pf: board private structure to initialize
+ */
+static void ice_init_pf(struct ice_pf *pf)
+{
+ bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
+ set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+
+ mutex_init(&pf->sw_mutex);
+ mutex_init(&pf->avail_q_mutex);
+
+ /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
+ mutex_lock(&pf->avail_q_mutex);
+ bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
+ bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
+ mutex_unlock(&pf->avail_q_mutex);
+
+ /* setup service timer and periodic service task */
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task);
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
+ * ice_ena_msix_range - Request a range of MSIX vectors from the OS
+ * @pf: board private structure
+ *
+ * compute the number of MSIX vectors required (v_budget) and request from
+ * the OS. Return the number of vectors reserved or negative on failure
+ */
+static int ice_ena_msix_range(struct ice_pf *pf)
+{
+ int v_left, v_actual, v_budget = 0;
+ int needed, err, i;
+
+ v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+
+ /* reserve one vector for miscellaneous handler */
+ needed = 1;
+ v_budget += needed;
+ v_left -= needed;
+
+ /* reserve vectors for LAN traffic */
+ pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
+ v_budget += pf->num_lan_msix;
+
+ pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!pf->msix_entries) {
+ err = -ENOMEM;
+ goto exit_err;
+ }
+
+ for (i = 0; i < v_budget; i++)
+ pf->msix_entries[i].entry = i;
+
+ /* actually reserve the vectors */
+ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ ICE_MIN_MSIX, v_budget);
+
+ if (v_actual < 0) {
+ dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
+ err = v_actual;
+ goto msix_err;
+ }
+
+ if (v_actual < v_budget) {
+ dev_warn(&pf->pdev->dev,
+ "not enough vectors. requested = %d, obtained = %d\n",
+ v_budget, v_actual);
+ if (v_actual >= (pf->num_lan_msix + 1)) {
+ pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
+ } else if (v_actual >= 2) {
+ pf->num_lan_msix = 1;
+ pf->num_avail_msix = v_actual - 2;
+ } else {
+ pci_disable_msix(pf->pdev);
+ err = -ERANGE;
+ goto msix_err;
+ }
+ }
+
+ return v_actual;
+
+msix_err:
+ devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ goto exit_err;
+
+exit_err:
+ pf->num_lan_msix = 0;
+ clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+ return err;
+}
+
+/**
+ * ice_dis_msix - Disable MSI-X interrupt setup in OS
+ * @pf: board private structure
+ */
+static void ice_dis_msix(struct ice_pf *pf)
+{
+ pci_disable_msix(pf->pdev);
+ devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ pf->msix_entries = NULL;
+ clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+}
+
+/**
+ * ice_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ */
+static int ice_init_interrupt_scheme(struct ice_pf *pf)
+{
+ int vectors = 0;
+ ssize_t size;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ vectors = ice_ena_msix_range(pf);
+ else
+ return -ENODEV;
+
+ if (vectors < 0)
+ return vectors;
+
+ /* set up vector assignment tracking */
+ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
+
+ pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->irq_tracker) {
+ ice_dis_msix(pf);
+ return -ENOMEM;
+ }
+
+ pf->irq_tracker->num_entries = vectors;
+
+ return 0;
+}
+
+/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+static void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_dis_msix(pf);
+
+ devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ pf->irq_tracker = NULL;
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -113,8 +756,70 @@ static int ice_probe(struct pci_dev *pdev,
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
+ ice_init_pf(pf);
+
+ ice_determine_q_usage(pf);
+
+ pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
+ hw->func_caps.guaranteed_num_vsi);
+ if (!pf->num_alloc_vsi) {
+ err = -EIO;
+ goto err_init_pf_unroll;
+ }
+
+ pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
+ sizeof(struct ice_vsi *), GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_init_pf_unroll;
+ }
+
+ err = ice_init_interrupt_scheme(pf);
+ if (err) {
+ dev_err(&pdev->dev,
+ "ice_init_interrupt_scheme failed: %d\n", err);
+ err = -EIO;
+ goto err_init_interrupt_unroll;
+ }
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(&pdev->dev,
+ "setup of misc vector failed: %d\n", err);
+ goto err_init_interrupt_unroll;
+ }
+ }
+
+ /* create switch struct for the switch element created by FW on boot */
+ pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
+ GFP_KERNEL);
+ if (!pf->first_sw) {
+ err = -ENOMEM;
+ goto err_msix_misc_unroll;
+ }
+
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ pf->first_sw->pf = pf;
+
+ /* record the sw_id available for later use */
+ pf->first_sw->sw_id = hw->port_info->sw_id;
+
return 0;
+err_msix_misc_unroll:
+ ice_free_irq_msix_misc(pf);
+err_init_interrupt_unroll:
+ ice_clear_interrupt_scheme(pf);
+ devm_kfree(&pdev->dev, pf->vsi);
+err_init_pf_unroll:
+ ice_deinit_pf(pf);
+ ice_deinit_hw(hw);
err_exit_unroll:
pci_disable_pcie_error_reporting(pdev);
return err;
@@ -133,6 +838,9 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
+ ice_free_irq_msix_misc(pf);
+ ice_clear_interrupt_scheme(pf);
+ ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
pci_disable_pcie_error_reporting(pdev);
}
@@ -176,9 +884,17 @@ static int __init ice_module_init(void)
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
+ ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
+ if (!ice_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
status = pci_register_driver(&ice_driver);
- if (status)
+ if (status) {
pr_err("failed to register pci driver, err %d\n", status);
+ destroy_workqueue(ice_wq);
+ }
return status;
}
@@ -193,6 +909,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ destroy_workqueue(ice_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
new file mode 100644
index 000000000000..7ceb69ea745e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_TXRX_H_
+#define _ICE_TXRX_H_
+
+#define ICE_DFLT_IRQ_WORK 256
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum ice_dyn_idx_t {
+ ICE_IDX_ITR0 = 0,
+ ICE_IDX_ITR1 = 1,
+ ICE_IDX_ITR2 = 2,
+ ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* indices into GLINT_ITR registers */
+#define ICE_RX_ITR ICE_IDX_ITR0
+#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define ICE_ITR_8K 0x003E
+
+/* apply ITR HW granularity translation to program the HW registers */
+#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
+
+#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6024c80bfa6b..2325be552a84 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -261,6 +261,17 @@ struct ice_hw {
u8 fw_min_ver; /* firmware minor version */
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
+
+ /* minimum allowed value for different speeds */
+#define ICE_ITR_GRAN_MIN_200 1
+#define ICE_ITR_GRAN_MIN_100 1
+#define ICE_ITR_GRAN_MIN_50 2
+#define ICE_ITR_GRAN_MIN_25 4
+ /* ITR granularity in 1 us */
+ u8 itr_gran_200;
+ u8 itr_gran_100;
+ u8 itr_gran_50;
+ u8 itr_gran_25;
};
/* Checksum and Shadow RAM pointers */