summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorKevin Hao <haokexin@gmail.com>2021-02-04 18:56:36 +0800
committerJakub Kicinski <kuba@kernel.org>2021-02-06 11:57:28 -0800
commit3f6e687dff395da43b056c18150a423bc7bf5d14 (patch)
treef569d71fb1d091a294c3a129bb08bb42c316b1f5 /net/core
parentb358e2122b9d7aa99f681d4edfafd999845d16ff (diff)
net: Introduce {netdev,napi}_alloc_frag_align()
In the current implementation of {netdev,napi}_alloc_frag(), it doesn't have any align guarantee for the returned buffer address, But for some hardwares they do require the DMA buffer to be aligned correctly, so we would have to use some workarounds like below if the buffers allocated by the {netdev,napi}_alloc_frag() are used by these hardwares for DMA. buf = napi_alloc_frag(really_needed_size + align); buf = PTR_ALIGN(buf, align); These codes seems ugly and would waste a lot of memories if the buffers are used in a network driver for the TX/RX. We have added the align support for the page_frag functions, so add the corresponding {netdev,napi}_frag functions. Signed-off-by: Kevin Hao <haokexin@gmail.com> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c26
1 files changed, 10 insertions, 16 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3787093239f5..d380c7b5a12d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -374,29 +374,23 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc(&nc->page, fragsz, gfp_mask);
+ return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
}
-void *napi_alloc_frag(unsigned int fragsz)
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
fragsz = SKB_DATA_ALIGN(fragsz);
- return __napi_alloc_frag(fragsz, GFP_ATOMIC);
+ return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
}
-EXPORT_SYMBOL(napi_alloc_frag);
+EXPORT_SYMBOL(__napi_alloc_frag_align);
-/**
- * netdev_alloc_frag - allocate a page fragment
- * @fragsz: fragment size
- *
- * Allocates a frag from a page for receive buffer.
- * Uses GFP_ATOMIC allocations.
- */
-void *netdev_alloc_frag(unsigned int fragsz)
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct page_frag_cache *nc;
void *data;
@@ -404,15 +398,15 @@ void *netdev_alloc_frag(unsigned int fragsz)
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_irq() || irqs_disabled()) {
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
+ data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
local_bh_disable();
- data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
+ data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;
}
-EXPORT_SYMBOL(netdev_alloc_frag);
+EXPORT_SYMBOL(__netdev_alloc_frag_align);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device