diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2017-01-17 08:36:03 -0800 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2017-02-16 04:02:44 -0800 |
commit | 1b56cf49f5b0bab9ad4eab18f9b0aee1929afd89 (patch) | |
tree | b5ca5f804f6c90de93efde85b822953850283396 /drivers/net/ethernet/intel/ixgbe/ixgbe.h | |
parent | f3213d9321735aa8e252d87796d5db43d4b830ec (diff) |
ixgbe: Update code to better handle incrementing page count
Batch the page count updates instead of doing them one at a time. By doing
this we can improve the overall performance as the atomic increment
operations can be expensive due to the fact that on x86 they are locked
operations which can cause stalls. By doing bulk updates we can
consolidate the stall which should help to improve the overall receive
performance.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Acked-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe.h')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 8167e77b924f..d765db6bd8b2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -195,7 +195,12 @@ struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct ixgbe_queue_stats { |