diff options
-rw-r--r-- | include/linux/skbuff.h | 36 | ||||
-rw-r--r-- | net/core/skbuff.c | 26 |
2 files changed, 44 insertions, 18 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0e42c53b8ca9..0a4e91a2f873 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2818,7 +2818,26 @@ void skb_queue_purge(struct sk_buff_head *list); unsigned int skb_rbtree_purge(struct rb_root *root); -void *netdev_alloc_frag(unsigned int fragsz); +void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); + +/** + * netdev_alloc_frag - allocate a page fragment + * @fragsz: fragment size + * + * Allocates a frag from a page for receive buffer. + * Uses GFP_ATOMIC allocations. + */ +static inline void *netdev_alloc_frag(unsigned int fragsz) +{ + return __netdev_alloc_frag_align(fragsz, ~0u); +} + +static inline void *netdev_alloc_frag_align(unsigned int fragsz, + unsigned int align) +{ + WARN_ON_ONCE(!is_power_of_2(align)); + return __netdev_alloc_frag_align(fragsz, -align); +} struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask); @@ -2877,7 +2896,20 @@ static inline void skb_free_frag(void *addr) page_frag_free(addr); } -void *napi_alloc_frag(unsigned int fragsz); +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); + +static inline void *napi_alloc_frag(unsigned int fragsz) +{ + return __napi_alloc_frag_align(fragsz, ~0u); +} + +static inline void *napi_alloc_frag_align(unsigned int fragsz, + unsigned int align) +{ + WARN_ON_ONCE(!is_power_of_2(align)); + return __napi_alloc_frag_align(fragsz, -align); +} + struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int length, gfp_t gfp_mask); static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3787093239f5..d380c7b5a12d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -374,29 +374,23 @@ struct napi_alloc_cache { static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); -static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) +static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, + unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); - return page_frag_alloc(&nc->page, fragsz, gfp_mask); + return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); } -void *napi_alloc_frag(unsigned int fragsz) +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { fragsz = SKB_DATA_ALIGN(fragsz); - return __napi_alloc_frag(fragsz, GFP_ATOMIC); + return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); } -EXPORT_SYMBOL(napi_alloc_frag); +EXPORT_SYMBOL(__napi_alloc_frag_align); -/** - * netdev_alloc_frag - allocate a page fragment - * @fragsz: fragment size - * - * Allocates a frag from a page for receive buffer. - * Uses GFP_ATOMIC allocations. - */ -void *netdev_alloc_frag(unsigned int fragsz) +void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct page_frag_cache *nc; void *data; @@ -404,15 +398,15 @@ void *netdev_alloc_frag(unsigned int fragsz) fragsz = SKB_DATA_ALIGN(fragsz); if (in_irq() || irqs_disabled()) { nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); + data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); } else { local_bh_disable(); - data = __napi_alloc_frag(fragsz, GFP_ATOMIC); + data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); local_bh_enable(); } return data; } -EXPORT_SYMBOL(netdev_alloc_frag); +EXPORT_SYMBOL(__netdev_alloc_frag_align); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |