summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
authorBrett Creeley <brett.creeley@intel.com>2019-02-28 15:25:47 -0800
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-04-18 08:38:48 -0700
commit711987bbad18e6d5fc5d8ff07c8991700e2ed1b6 (patch)
treebaabd204980889bce5a760d5f65a29234364e80d /drivers/net/ethernet/intel/ice
parent9c010de7cf0a76f22fb38800816d34cf95ff9002 (diff)
ice: Calculate ITR increment based on direct calculation
Currently when calculating how much to increment ITR by inside of ice_update_itr() we do some estimations and intermediate calculations. Instead of doing estimations, just do the calculation directly. This allows for a more accurate value and it makes it easier for the next person to understand and update. Also, remove the dividing the ITR value by 2 when latency driven because the ITR values are already so low for 100Gbps speed. This should help get to the desired ITR value faster. Signed-off-by: Brett Creeley <brett.creeley@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c135
1 files changed, 63 insertions, 72 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a1dcde49eb99..79043fec0187 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1097,19 +1097,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_pkts;
}
-static unsigned int ice_itr_divisor(struct ice_port_info *pi)
+/**
+ * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
+ * @port_info: port_info structure containing the current link speed
+ * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
+ * @itr: itr value to update
+ *
+ * Calculate how big of an increment should be applied to the ITR value passed
+ * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * link speed.
+ *
+ * The following is a calculation derived from:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to:
+ *
+ * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
+ * ITR = -------------------------------------------- * --------------
+ * rate pkt_size + 640
+ */
+static unsigned int
+ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
+ unsigned int avg_pkt_size,
+ unsigned int itr)
{
- switch (pi->phy.link_info.link_speed) {
+ switch (port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
case ICE_AQ_LINK_SPEED_40GB:
- return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
+ itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
case ICE_AQ_LINK_SPEED_25GB:
+ itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
case ICE_AQ_LINK_SPEED_20GB:
- return ICE_ITR_ADAPTIVE_MIN_INC * 512;
- case ICE_AQ_LINK_SPEED_100MB:
- return ICE_ITR_ADAPTIVE_MIN_INC * 32;
+ itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ /* fall through */
default:
- return ICE_ITR_ADAPTIVE_MIN_INC * 256;
+ itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
}
+
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+
+ return itr;
}
/**
@@ -1128,8 +1178,8 @@ static unsigned int ice_itr_divisor(struct ice_port_info *pi)
static void
ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
{
- unsigned int avg_wire_size, packets, bytes, itr;
unsigned long next_update = jiffies;
+ unsigned int packets, bytes, itr;
bool container_is_rx;
if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
@@ -1174,7 +1224,7 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
if (packets && packets < 4 && bytes < 9000 &&
(q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
itr = ICE_ITR_ADAPTIVE_LATENCY;
- goto adjust_by_size;
+ goto adjust_by_size_and_speed;
}
} else if (packets < 4) {
/* If we have Tx and Rx ITR maxed and Tx ITR is running in
@@ -1242,70 +1292,11 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
*/
itr = ICE_ITR_ADAPTIVE_BULK;
-adjust_by_size:
- /* If packet counts are 256 or greater we can assume we have a gross
- * overestimation of what the rate should be. Instead of trying to fine
- * tune it just use the formula below to try and dial in an exact value
- * gives the current packet size of the frame.
- */
- avg_wire_size = bytes / packets;
+adjust_by_size_and_speed:
- /* The following is a crude approximation of:
- * wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
- * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
- *
- * Assuming wmem_default is 212992 and overhead is 640 bytes per
- * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
- * formula down to
- *
- * (170 * (size + 24)) / (size + 640) = ITR
- *
- * We first do some math on the packet size and then finally bitshift
- * by 8 after rounding up. We also have to account for PCIe link speed
- * difference as ITR scales based on this.
- */
- if (avg_wire_size <= 60) {
- /* Start at 250k ints/sec */
- avg_wire_size = 4096;
- } else if (avg_wire_size <= 380) {
- /* 250K ints/sec to 60K ints/sec */
- avg_wire_size *= 40;
- avg_wire_size += 1696;
- } else if (avg_wire_size <= 1084) {
- /* 60K ints/sec to 36K ints/sec */
- avg_wire_size *= 15;
- avg_wire_size += 11452;
- } else if (avg_wire_size <= 1980) {
- /* 36K ints/sec to 30K ints/sec */
- avg_wire_size *= 5;
- avg_wire_size += 22420;
- } else {
- /* plateau at a limit of 30K ints/sec */
- avg_wire_size = 32256;
- }
-
- /* If we are in low latency mode halve our delay which doubles the
- * rate to somewhere between 100K to 16K ints/sec
- */
- if (itr & ICE_ITR_ADAPTIVE_LATENCY)
- avg_wire_size >>= 1;
-
- /* Resultant value is 256 times larger than it needs to be. This
- * gives us room to adjust the value as needed to either increase
- * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
- *
- * Use addition as we have already recorded the new latency flag
- * for the ITR value.
- */
- itr += DIV_ROUND_UP(avg_wire_size,
- ice_itr_divisor(q_vector->vsi->port_info)) *
- ICE_ITR_ADAPTIVE_MIN_INC;
-
- if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
- itr &= ICE_ITR_ADAPTIVE_LATENCY;
- itr += ICE_ITR_ADAPTIVE_MAX_USECS;
- }
+ /* based on checks above packets cannot be 0 so division is safe */
+ itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
+ bytes / packets, itr);
clear_counts:
/* write back value */