summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-11-27 22:24:48 -0800
committerDavid S. Miller <davem@davemloft.net>2018-11-30 13:30:44 -0800
commit95862749674f598e979cb3ef2d18aec2e5d8bd29 (patch)
treee1332f741a552545059a1d8865f135ce61f243d9
parent6015c71e656bb6895b416c31a8b7db457e45cecf (diff)
nfp: copy only the relevant part of the TX descriptor for frags
Chained descriptors for fragments need to duplicate all the descriptor fields of the skb head, so we copy the descriptor and then modify the relevant fields. This is wasteful, because the top half of the descriptor will get overwritten entirely while the bottom half is not modified at all. Copy only the bottom half. This saves us 0.3% of CPU in a GSO test. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c12
2 files changed, 8 insertions, 5 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index bb3dbd74583b..5a9a6178cf26 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -158,6 +158,7 @@ struct nfp_net_tx_desc {
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
+ __le64 vals8[2];
};
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9aa6265bf4de..b54c6e481229 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -786,11 +786,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
- struct nfp_net_tx_desc *txd, txdg;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_buf *txbuf;
+ struct nfp_net_tx_desc *txd;
struct netdev_queue *nd_q;
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
@@ -860,8 +860,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
/* Gather DMA */
if (nr_frags > 0) {
+ __le64 second_half;
+
/* all descs must match except for in addr, length and eop */
- txdg = *txd;
+ second_half = txd->vals8[1];
for (f = 0; f < nr_frags; f++) {
frag = &skb_shinfo(skb)->frags[f];
@@ -878,11 +880,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
tx_ring->txbufs[wr_idx].fidx = f;
txd = &tx_ring->txds[wr_idx];
- *txd = txdg;
txd->dma_len = cpu_to_le16(fsize);
nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop |=
- (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
+ txd->offset_eop = md_bytes |
+ ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
+ txd->vals8[1] = second_half;
}
u64_stats_update_begin(&r_vec->tx_sync);