From 948d4f214fde43743c57aae0c708bff44f6345f2 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:10 +0900 Subject: veth: Add driver XDP This is the basic implementation of veth driver XDP. Incoming packets are sent from the peer veth device in the form of skb, so this is generally doing the same thing as generic XDP. This itself is not so useful, but a starting point to implement other useful veth XDP features like TX and REDIRECT. This introduces NAPI when XDP is enabled, because XDP is now heavily relies on NAPI context. Use ptr_ring to emulate NIC ring. Tx function enqueues packets to the ring and peer NAPI handler drains the ring. Currently only one ring is allocated for each veth device, so it does not scale on multiqueue env. This can be resolved by allocating rings on the per-queue basis later. Note that NAPI is not used but netif_rx is used when XDP is not loaded, so this does not change the default behaviour. v6: - Check skb->len only when allocation is needed. - Add __GFP_NOWARN to alloc_page() as it can be triggered by external events. v3: - Fix race on closing the device. - Add extack messages in ndo_bpf. v2: - Squashed with the patch adding NAPI. - Implement adjust_tail. - Don't acquire consumer lock because it is guarded by NAPI. - Make poll_controller noop since it is unnecessary. - Register rxq_info on enabling XDP rather than on opening the device. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 374 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 367 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index a69ad39ee57e..d3b9f10bea24 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -19,10 +19,18 @@ #include #include #include +#include +#include +#include +#include +#include #define DRV_NAME "veth" #define DRV_VERSION "1.0" +#define VETH_RING_SIZE 256 +#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) + struct pcpu_vstats { u64 packets; u64 bytes; @@ -30,9 +38,16 @@ struct pcpu_vstats { }; struct veth_priv { + struct napi_struct xdp_napi; + struct net_device *dev; + struct bpf_prog __rcu *xdp_prog; + struct bpf_prog *_xdp_prog; struct net_device __rcu *peer; atomic64_t dropped; unsigned requested_headroom; + bool rx_notify_masked; + struct ptr_ring xdp_ring; + struct xdp_rxq_info xdp_rxq; }; /* @@ -98,11 +113,43 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_link_ksettings = veth_get_link_ksettings, }; -static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) +/* general routines */ + +static void __veth_xdp_flush(struct veth_priv *priv) +{ + /* Write ptr_ring before reading rx_notify_masked */ + smp_mb(); + if (!priv->rx_notify_masked) { + priv->rx_notify_masked = true; + napi_schedule(&priv->xdp_napi); + } +} + +static int veth_xdp_rx(struct veth_priv *priv, struct sk_buff *skb) +{ + if (unlikely(ptr_ring_produce(&priv->xdp_ring, skb))) { + dev_kfree_skb_any(skb); + return NET_RX_DROP; + } + + return NET_RX_SUCCESS; +} + +static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, bool xdp) { struct veth_priv *priv = netdev_priv(dev); + + return __dev_forward_skb(dev, skb) ?: xdp ? + veth_xdp_rx(priv, skb) : + netif_rx(skb); +} + +static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *rcv; int length = skb->len; + bool rcv_xdp = false; rcu_read_lock(); rcv = rcu_dereference(priv->peer); @@ -111,7 +158,10 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; } - if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { + rcv_priv = netdev_priv(rcv); + rcv_xdp = rcu_access_pointer(rcv_priv->xdp_prog); + + if (likely(veth_forward_skb(rcv, skb, rcv_xdp) == NET_RX_SUCCESS)) { struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); u64_stats_update_begin(&stats->syncp); @@ -122,14 +172,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) drop: atomic64_inc(&priv->dropped); } + + if (rcv_xdp) + __veth_xdp_flush(rcv_priv); + rcu_read_unlock(); + return NETDEV_TX_OK; } -/* - * general routines - */ - static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); @@ -179,18 +230,254 @@ static void veth_set_multicast_list(struct net_device *dev) { } +static struct sk_buff *veth_build_skb(void *head, int headroom, int len, + int buflen) +{ + struct sk_buff *skb; + + if (!buflen) { + buflen = SKB_DATA_ALIGN(headroom + len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + skb = build_skb(head, buflen); + if (!skb) + return NULL; + + skb_reserve(skb, headroom); + skb_put(skb, len); + + return skb; +} + +static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, + struct sk_buff *skb) +{ + u32 pktlen, headroom, act, metalen; + void *orig_data, *orig_data_end; + struct bpf_prog *xdp_prog; + int mac_len, delta, off; + struct xdp_buff xdp; + + rcu_read_lock(); + xdp_prog = rcu_dereference(priv->xdp_prog); + if (unlikely(!xdp_prog)) { + rcu_read_unlock(); + goto out; + } + + mac_len = skb->data - skb_mac_header(skb); + pktlen = skb->len + mac_len; + headroom = skb_headroom(skb) - mac_len; + + if (skb_shared(skb) || skb_head_is_locked(skb) || + skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) { + struct sk_buff *nskb; + int size, head_off; + void *head, *start; + struct page *page; + + size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (size > PAGE_SIZE) + goto drop; + + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) + goto drop; + + head = page_address(page); + start = head + VETH_XDP_HEADROOM; + if (skb_copy_bits(skb, -mac_len, start, pktlen)) { + page_frag_free(head); + goto drop; + } + + nskb = veth_build_skb(head, + VETH_XDP_HEADROOM + mac_len, skb->len, + PAGE_SIZE); + if (!nskb) { + page_frag_free(head); + goto drop; + } + + skb_copy_header(nskb, skb); + head_off = skb_headroom(nskb) - skb_headroom(skb); + skb_headers_offset_update(nskb, head_off); + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + consume_skb(skb); + skb = nskb; + } + + xdp.data_hard_start = skb->head; + xdp.data = skb_mac_header(skb); + xdp.data_end = xdp.data + pktlen; + xdp.data_meta = xdp.data; + xdp.rxq = &priv->xdp_rxq; + orig_data = xdp.data; + orig_data_end = xdp.data_end; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(priv->dev, xdp_prog, act); + case XDP_DROP: + goto drop; + } + rcu_read_unlock(); + + delta = orig_data - xdp.data; + off = mac_len + delta; + if (off > 0) + __skb_push(skb, off); + else if (off < 0) + __skb_pull(skb, -off); + skb->mac_header -= delta; + off = xdp.data_end - orig_data_end; + if (off != 0) + __skb_put(skb, off); + skb->protocol = eth_type_trans(skb, priv->dev); + + metalen = xdp.data - xdp.data_meta; + if (metalen) + skb_metadata_set(skb, metalen); +out: + return skb; +drop: + rcu_read_unlock(); + kfree_skb(skb); + return NULL; +} + +static int veth_xdp_rcv(struct veth_priv *priv, int budget) +{ + int i, done = 0; + + for (i = 0; i < budget; i++) { + struct sk_buff *skb = __ptr_ring_consume(&priv->xdp_ring); + + if (!skb) + break; + + skb = veth_xdp_rcv_skb(priv, skb); + + if (skb) + napi_gro_receive(&priv->xdp_napi, skb); + + done++; + } + + return done; +} + +static int veth_poll(struct napi_struct *napi, int budget) +{ + struct veth_priv *priv = + container_of(napi, struct veth_priv, xdp_napi); + int done; + + done = veth_xdp_rcv(priv, budget); + + if (done < budget && napi_complete_done(napi, done)) { + /* Write rx_notify_masked before reading ptr_ring */ + smp_store_mb(priv->rx_notify_masked, false); + if (unlikely(!__ptr_ring_empty(&priv->xdp_ring))) { + priv->rx_notify_masked = true; + napi_schedule(&priv->xdp_napi); + } + } + + return done; +} + +static int veth_napi_add(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + int err; + + err = ptr_ring_init(&priv->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); + if (err) + return err; + + netif_napi_add(dev, &priv->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + napi_enable(&priv->xdp_napi); + + return 0; +} + +static void veth_napi_del(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + napi_disable(&priv->xdp_napi); + netif_napi_del(&priv->xdp_napi); + priv->rx_notify_masked = false; + ptr_ring_cleanup(&priv->xdp_ring, __skb_array_destroy_skb); +} + +static int veth_enable_xdp(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + int err; + + if (!xdp_rxq_info_is_reg(&priv->xdp_rxq)) { + err = xdp_rxq_info_reg(&priv->xdp_rxq, dev, 0); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&priv->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) + goto err; + + err = veth_napi_add(dev); + if (err) + goto err; + } + + rcu_assign_pointer(priv->xdp_prog, priv->_xdp_prog); + + return 0; +err: + xdp_rxq_info_unreg(&priv->xdp_rxq); + + return err; +} + +static void veth_disable_xdp(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + rcu_assign_pointer(priv->xdp_prog, NULL); + veth_napi_del(dev); + xdp_rxq_info_unreg(&priv->xdp_rxq); +} + static int veth_open(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); + int err; if (!peer) return -ENOTCONN; + if (priv->_xdp_prog) { + err = veth_enable_xdp(dev); + if (err) + return err; + } + if (peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(peer); } + return 0; } @@ -203,6 +490,9 @@ static int veth_close(struct net_device *dev) if (peer) netif_carrier_off(peer); + if (priv->_xdp_prog) + veth_disable_xdp(dev); + return 0; } @@ -228,7 +518,7 @@ static void veth_dev_free(struct net_device *dev) static void veth_poll_controller(struct net_device *dev) { /* veth only receives frames when its peer sends one - * Since it's a synchronous operation, we are guaranteed + * Since it has nothing to do with disabling irqs, we are guaranteed * never to have pending data when we poll for it so * there is nothing to do here. * @@ -276,6 +566,72 @@ out: rcu_read_unlock(); } +static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct veth_priv *priv = netdev_priv(dev); + struct bpf_prog *old_prog; + struct net_device *peer; + int err; + + old_prog = priv->_xdp_prog; + priv->_xdp_prog = prog; + peer = rtnl_dereference(priv->peer); + + if (prog) { + if (!peer) { + NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached"); + err = -ENOTCONN; + goto err; + } + + if (dev->flags & IFF_UP) { + err = veth_enable_xdp(dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed"); + goto err; + } + } + } + + if (old_prog) { + if (!prog && dev->flags & IFF_UP) + veth_disable_xdp(dev); + bpf_prog_put(old_prog); + } + + return 0; +err: + priv->_xdp_prog = old_prog; + + return err; +} + +static u32 veth_xdp_query(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + const struct bpf_prog *xdp_prog; + + xdp_prog = priv->_xdp_prog; + if (xdp_prog) + return xdp_prog->aux->id; + + return 0; +} + +static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return veth_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = veth_xdp_query(dev); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, @@ -290,6 +646,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_get_iflink = veth_get_iflink, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, + .ndo_bpf = veth_xdp, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ @@ -451,10 +808,13 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, */ priv = netdev_priv(dev); + priv->dev = dev; rcu_assign_pointer(priv->peer, peer); priv = netdev_priv(peer); + priv->dev = peer; rcu_assign_pointer(priv->peer, dev); + return 0; err_register_dev: -- cgit v1.2.3 From dc2248220a4aa61560c95aca98d4162095bd7e8a Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:11 +0900 Subject: veth: Avoid drops by oversized packets when XDP is enabled Oversized packets including GSO packets can be dropped if XDP is enabled on receiver side, so don't send such packets from peer. Drop TSO and SCTP fragmentation features so that veth devices themselves segment packets with XDP enabled. Also cap MTU accordingly. v4: - Don't auto-adjust MTU but cap max MTU. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index d3b9f10bea24..9edf104739da 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -543,6 +543,23 @@ static int veth_get_iflink(const struct net_device *dev) return iflink; } +static netdev_features_t veth_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer; + + peer = rtnl_dereference(priv->peer); + if (peer) { + struct veth_priv *peer_priv = netdev_priv(peer); + + if (peer_priv->_xdp_prog) + features &= ~NETIF_F_GSO_SOFTWARE; + } + + return features; +} + static void veth_set_rx_headroom(struct net_device *dev, int new_hr) { struct veth_priv *peer_priv, *priv = netdev_priv(dev); @@ -572,6 +589,7 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct veth_priv *priv = netdev_priv(dev); struct bpf_prog *old_prog; struct net_device *peer; + unsigned int max_mtu; int err; old_prog = priv->_xdp_prog; @@ -585,6 +603,15 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } + max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM - + peer->hard_header_len - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (peer->mtu > max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); + err = -ERANGE; + goto err; + } + if (dev->flags & IFF_UP) { err = veth_enable_xdp(dev); if (err) { @@ -592,14 +619,29 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } } + + if (!old_prog) { + peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; + peer->max_mtu = max_mtu; + } } if (old_prog) { - if (!prog && dev->flags & IFF_UP) - veth_disable_xdp(dev); + if (!prog) { + if (dev->flags & IFF_UP) + veth_disable_xdp(dev); + + if (peer) { + peer->hw_features |= NETIF_F_GSO_SOFTWARE; + peer->max_mtu = ETH_MAX_MTU; + } + } bpf_prog_put(old_prog); } + if ((!!old_prog ^ !!prog) && peer) + netdev_update_features(peer); + return 0; err: priv->_xdp_prog = old_prog; @@ -644,6 +686,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_poll_controller = veth_poll_controller, #endif .ndo_get_iflink = veth_get_iflink, + .ndo_fix_features = veth_fix_features, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, -- cgit v1.2.3 From 9fc8d518d9d590998209f2686e026a488f65d41e Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:13 +0900 Subject: veth: Handle xdp_frames in xdp napi ring This is preparation for XDP TX and ndo_xdp_xmit. This allows napi handler to handle xdp_frames through xdp ring as well as sk_buff. v8: - Don't use xdp_frame pointer address to calculate skb->head and headroom. v7: - Use xdp_scrub_frame() instead of memset(). v3: - Revert v2 change around rings and use a flag to differentiate skb and xdp_frame, since bulk skb xmit makes little performance difference for now. v2: - Use another ring instead of using flag to differentiate skb and xdp_frame. This approach makes bulk skb transmit possible in veth_xmit later. - Clear xdp_frame feilds in skb->head. - Implement adjust_tail. Signed-off-by: Toshiaki Makita Acked-by: John Fastabend Acked-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9edf104739da..89f3059e603d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -22,12 +22,12 @@ #include #include #include -#include #include #define DRV_NAME "veth" #define DRV_VERSION "1.0" +#define VETH_XDP_FLAG BIT(0) #define VETH_RING_SIZE 256 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) @@ -115,6 +115,24 @@ static const struct ethtool_ops veth_ethtool_ops = { /* general routines */ +static bool veth_is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & VETH_XDP_FLAG; +} + +static void *veth_ptr_to_xdp(void *ptr) +{ + return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); +} + +static void veth_ptr_free(void *ptr) +{ + if (veth_is_xdp_frame(ptr)) + xdp_return_frame(veth_ptr_to_xdp(ptr)); + else + kfree_skb(ptr); +} + static void __veth_xdp_flush(struct veth_priv *priv) { /* Write ptr_ring before reading rx_notify_masked */ @@ -249,6 +267,63 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len, return skb; } +static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, + struct xdp_frame *frame) +{ + void *hard_start = frame->data - frame->headroom; + void *head = hard_start - sizeof(struct xdp_frame); + int len = frame->len, delta = 0; + struct bpf_prog *xdp_prog; + unsigned int headroom; + struct sk_buff *skb; + + rcu_read_lock(); + xdp_prog = rcu_dereference(priv->xdp_prog); + if (likely(xdp_prog)) { + struct xdp_buff xdp; + u32 act; + + xdp.data_hard_start = hard_start; + xdp.data = frame->data; + xdp.data_end = frame->data + frame->len; + xdp.data_meta = frame->data - frame->metasize; + xdp.rxq = &priv->xdp_rxq; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_PASS: + delta = frame->data - xdp.data; + len = xdp.data_end - xdp.data; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(priv->dev, xdp_prog, act); + case XDP_DROP: + goto err_xdp; + } + } + rcu_read_unlock(); + + headroom = sizeof(struct xdp_frame) + frame->headroom - delta; + skb = veth_build_skb(head, headroom, len, 0); + if (!skb) { + xdp_return_frame(frame); + goto err; + } + + xdp_scrub_frame(frame); + skb->protocol = eth_type_trans(skb, priv->dev); +err: + return skb; +err_xdp: + rcu_read_unlock(); + xdp_return_frame(frame); + + return NULL; +} + static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, struct sk_buff *skb) { @@ -359,12 +434,16 @@ static int veth_xdp_rcv(struct veth_priv *priv, int budget) int i, done = 0; for (i = 0; i < budget; i++) { - struct sk_buff *skb = __ptr_ring_consume(&priv->xdp_ring); + void *ptr = __ptr_ring_consume(&priv->xdp_ring); + struct sk_buff *skb; - if (!skb) + if (!ptr) break; - skb = veth_xdp_rcv_skb(priv, skb); + if (veth_is_xdp_frame(ptr)) + skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr)); + else + skb = veth_xdp_rcv_skb(priv, ptr); if (skb) napi_gro_receive(&priv->xdp_napi, skb); @@ -417,7 +496,7 @@ static void veth_napi_del(struct net_device *dev) napi_disable(&priv->xdp_napi); netif_napi_del(&priv->xdp_napi); priv->rx_notify_masked = false; - ptr_ring_cleanup(&priv->xdp_ring, __skb_array_destroy_skb); + ptr_ring_cleanup(&priv->xdp_ring, veth_ptr_free); } static int veth_enable_xdp(struct net_device *dev) -- cgit v1.2.3 From af87a3aa1b5f397a2f5c99b97b000943c5177da7 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:14 +0900 Subject: veth: Add ndo_xdp_xmit This allows NIC's XDP to redirect packets to veth. The destination veth device enqueues redirected packets to the napi ring of its peer, then they are processed by XDP on its peer veth device. This can be thought as calling another XDP program by XDP program using REDIRECT, when the peer enables driver XDP. Note that when the peer veth device does not set driver xdp, redirected packets will be dropped because the peer is not ready for NAPI. v4: - Don't use xdp_ok_fwd_dev() because checking IFF_UP is not necessary. Add comments about it and check only MTU. v2: - Drop the part converting xdp_frame into skb when XDP is not enabled. - Implement bulk interface of ndo_xdp_xmit. - Implement XDP_XMIT_FLUSH bit and drop ndo_xdp_flush. Signed-off-by: Toshiaki Makita Acked-by: John Fastabend Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 89f3059e603d..dbb693a7795e 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -125,6 +126,11 @@ static void *veth_ptr_to_xdp(void *ptr) return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); } +static void *veth_xdp_to_ptr(void *ptr) +{ + return (void *)((unsigned long)ptr | VETH_XDP_FLAG); +} + static void veth_ptr_free(void *ptr) { if (veth_is_xdp_frame(ptr)) @@ -267,6 +273,50 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len, return skb; } +static int veth_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct net_device *rcv; + unsigned int max_len; + int i, drops = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + rcv = rcu_dereference(priv->peer); + if (unlikely(!rcv)) + return -ENXIO; + + rcv_priv = netdev_priv(rcv); + /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive + * side. This means an XDP program is loaded on the peer and the peer + * device is up. + */ + if (!rcu_access_pointer(rcv_priv->xdp_prog)) + return -ENXIO; + + max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; + + spin_lock(&rcv_priv->xdp_ring.producer_lock); + for (i = 0; i < n; i++) { + struct xdp_frame *frame = frames[i]; + void *ptr = veth_xdp_to_ptr(frame); + + if (unlikely(frame->len > max_len || + __ptr_ring_produce(&rcv_priv->xdp_ring, ptr))) { + xdp_return_frame_rx_napi(frame); + drops++; + } + } + spin_unlock(&rcv_priv->xdp_ring.producer_lock); + + if (flags & XDP_XMIT_FLUSH) + __veth_xdp_flush(rcv_priv); + + return n - drops; +} + static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, struct xdp_frame *frame) { @@ -769,6 +819,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, + .ndo_xdp_xmit = veth_xdp_xmit, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ -- cgit v1.2.3 From d1396004dd868642ea2596abe058d96dcf97990f Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:17 +0900 Subject: veth: Add XDP TX and REDIRECT This allows further redirection of xdp_frames like NIC -> veth--veth -> veth--veth (XDP) (XDP) (XDP) The intermediate XDP, redirecting packets from NIC to the other veth, reuses xdp_mem_info from NIC so that page recycling of the NIC works on the destination veth's XDP. In this way return_frame is not fully guarded by NAPI, since another NAPI handler on another cpu may use the same xdp_mem_info concurrently. Thus disable napi_direct by xdp_set_return_frame_no_direct() during the NAPI context. v8: - Don't use xdp_frame pointer address for data_hard_start of xdp_buff. v4: - Use xdp_[set|clear]_return_frame_no_direct() instead of a flag in xdp_mem_info. v3: - Fix double free when veth_xdp_tx() returns a positive value. - Convert xdp_xmit and xdp_redir variables into flags. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index dbb693a7795e..9b0a7b9ab1b8 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -32,6 +32,10 @@ #define VETH_RING_SIZE 256 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) +/* Separating two types of XDP xmit */ +#define VETH_XDP_TX BIT(0) +#define VETH_XDP_REDIR BIT(1) + struct pcpu_vstats { u64 packets; u64 bytes; @@ -45,6 +49,7 @@ struct veth_priv { struct bpf_prog *_xdp_prog; struct net_device __rcu *peer; atomic64_t dropped; + struct xdp_mem_info xdp_mem; unsigned requested_headroom; bool rx_notify_masked; struct ptr_ring xdp_ring; @@ -317,12 +322,44 @@ static int veth_xdp_xmit(struct net_device *dev, int n, return n - drops; } +static void veth_xdp_flush(struct net_device *dev) +{ + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct net_device *rcv; + + rcu_read_lock(); + rcv = rcu_dereference(priv->peer); + if (unlikely(!rcv)) + goto out; + + rcv_priv = netdev_priv(rcv); + /* xdp_ring is initialized on receive side? */ + if (unlikely(!rcu_access_pointer(rcv_priv->xdp_prog))) + goto out; + + __veth_xdp_flush(rcv_priv); +out: + rcu_read_unlock(); +} + +static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) +{ + struct xdp_frame *frame = convert_to_xdp_frame(xdp); + + if (unlikely(!frame)) + return -EOVERFLOW; + + return veth_xdp_xmit(dev, 1, &frame, 0); +} + static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, - struct xdp_frame *frame) + struct xdp_frame *frame, + unsigned int *xdp_xmit) { void *hard_start = frame->data - frame->headroom; void *head = hard_start - sizeof(struct xdp_frame); int len = frame->len, delta = 0; + struct xdp_frame orig_frame; struct bpf_prog *xdp_prog; unsigned int headroom; struct sk_buff *skb; @@ -346,6 +383,29 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, delta = frame->data - xdp.data; len = xdp.data_end - xdp.data; break; + case XDP_TX: + orig_frame = *frame; + xdp.data_hard_start = head; + xdp.rxq->mem = frame->mem; + if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { + trace_xdp_exception(priv->dev, xdp_prog, act); + frame = &orig_frame; + goto err_xdp; + } + *xdp_xmit |= VETH_XDP_TX; + rcu_read_unlock(); + goto xdp_xmit; + case XDP_REDIRECT: + orig_frame = *frame; + xdp.data_hard_start = head; + xdp.rxq->mem = frame->mem; + if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) { + frame = &orig_frame; + goto err_xdp; + } + *xdp_xmit |= VETH_XDP_REDIR; + rcu_read_unlock(); + goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -370,12 +430,13 @@ err: err_xdp: rcu_read_unlock(); xdp_return_frame(frame); - +xdp_xmit: return NULL; } static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int *xdp_xmit) { u32 pktlen, headroom, act, metalen; void *orig_data, *orig_data_end; @@ -447,6 +508,26 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, switch (act) { case XDP_PASS: break; + case XDP_TX: + get_page(virt_to_page(xdp.data)); + consume_skb(skb); + xdp.rxq->mem = priv->xdp_mem; + if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { + trace_xdp_exception(priv->dev, xdp_prog, act); + goto err_xdp; + } + *xdp_xmit |= VETH_XDP_TX; + rcu_read_unlock(); + goto xdp_xmit; + case XDP_REDIRECT: + get_page(virt_to_page(xdp.data)); + consume_skb(skb); + xdp.rxq->mem = priv->xdp_mem; + if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) + goto err_xdp; + *xdp_xmit |= VETH_XDP_REDIR; + rcu_read_unlock(); + goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -477,9 +558,15 @@ drop: rcu_read_unlock(); kfree_skb(skb); return NULL; +err_xdp: + rcu_read_unlock(); + page_frag_free(xdp.data); +xdp_xmit: + return NULL; } -static int veth_xdp_rcv(struct veth_priv *priv, int budget) +static int veth_xdp_rcv(struct veth_priv *priv, int budget, + unsigned int *xdp_xmit) { int i, done = 0; @@ -490,10 +577,12 @@ static int veth_xdp_rcv(struct veth_priv *priv, int budget) if (!ptr) break; - if (veth_is_xdp_frame(ptr)) - skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr)); - else - skb = veth_xdp_rcv_skb(priv, ptr); + if (veth_is_xdp_frame(ptr)) { + skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr), + xdp_xmit); + } else { + skb = veth_xdp_rcv_skb(priv, ptr, xdp_xmit); + } if (skb) napi_gro_receive(&priv->xdp_napi, skb); @@ -508,9 +597,11 @@ static int veth_poll(struct napi_struct *napi, int budget) { struct veth_priv *priv = container_of(napi, struct veth_priv, xdp_napi); + unsigned int xdp_xmit = 0; int done; - done = veth_xdp_rcv(priv, budget); + xdp_set_return_frame_no_direct(); + done = veth_xdp_rcv(priv, budget, &xdp_xmit); if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ @@ -521,6 +612,12 @@ static int veth_poll(struct napi_struct *napi, int budget) } } + if (xdp_xmit & VETH_XDP_TX) + veth_xdp_flush(priv->dev); + if (xdp_xmit & VETH_XDP_REDIR) + xdp_do_flush_map(); + xdp_clear_return_frame_no_direct(); + return done; } @@ -567,6 +664,9 @@ static int veth_enable_xdp(struct net_device *dev) err = veth_napi_add(dev); if (err) goto err; + + /* Save original mem info as it can be overwritten */ + priv->xdp_mem = priv->xdp_rxq.mem; } rcu_assign_pointer(priv->xdp_prog, priv->_xdp_prog); @@ -584,6 +684,7 @@ static void veth_disable_xdp(struct net_device *dev) rcu_assign_pointer(priv->xdp_prog, NULL); veth_napi_del(dev); + priv->xdp_rxq.mem = priv->xdp_mem; xdp_rxq_info_unreg(&priv->xdp_rxq); } -- cgit v1.2.3 From 638264dc90227cca00d20c26680171addce18e51 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:18 +0900 Subject: veth: Support per queue XDP ring Move XDP and napi related fields from veth_priv to newly created veth_rq structure. When xdp_frames are enqueued from ndo_xdp_xmit and XDP_TX, rxq is selected by current cpu. When skbs are enqueued from the peer device, rxq is one to one mapping of its peer txq. This way we have a restriction that the number of rxqs must not less than the number of peer txqs, but leave the possibility to achieve bulk skb xmit in the future because txq lock would make it possible to remove rxq ptr_ring lock. v3: - Add extack messages. - Fix array overrun in veth_xmit. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 278 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 188 insertions(+), 90 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9b0a7b9ab1b8..e3202af72df5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -42,20 +42,24 @@ struct pcpu_vstats { struct u64_stats_sync syncp; }; -struct veth_priv { +struct veth_rq { struct napi_struct xdp_napi; struct net_device *dev; struct bpf_prog __rcu *xdp_prog; - struct bpf_prog *_xdp_prog; - struct net_device __rcu *peer; - atomic64_t dropped; struct xdp_mem_info xdp_mem; - unsigned requested_headroom; bool rx_notify_masked; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; }; +struct veth_priv { + struct net_device __rcu *peer; + atomic64_t dropped; + struct bpf_prog *_xdp_prog; + struct veth_rq *rq; + unsigned int requested_headroom; +}; + /* * ethtool interface */ @@ -144,19 +148,19 @@ static void veth_ptr_free(void *ptr) kfree_skb(ptr); } -static void __veth_xdp_flush(struct veth_priv *priv) +static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); - if (!priv->rx_notify_masked) { - priv->rx_notify_masked = true; - napi_schedule(&priv->xdp_napi); + if (!rq->rx_notify_masked) { + rq->rx_notify_masked = true; + napi_schedule(&rq->xdp_napi); } } -static int veth_xdp_rx(struct veth_priv *priv, struct sk_buff *skb) +static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) { - if (unlikely(ptr_ring_produce(&priv->xdp_ring, skb))) { + if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { dev_kfree_skb_any(skb); return NET_RX_DROP; } @@ -164,21 +168,22 @@ static int veth_xdp_rx(struct veth_priv *priv, struct sk_buff *skb) return NET_RX_SUCCESS; } -static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, bool xdp) +static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, + struct veth_rq *rq, bool xdp) { - struct veth_priv *priv = netdev_priv(dev); - return __dev_forward_skb(dev, skb) ?: xdp ? - veth_xdp_rx(priv, skb) : + veth_xdp_rx(rq, skb) : netif_rx(skb); } static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct veth_rq *rq = NULL; struct net_device *rcv; int length = skb->len; bool rcv_xdp = false; + int rxq; rcu_read_lock(); rcv = rcu_dereference(priv->peer); @@ -188,9 +193,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) } rcv_priv = netdev_priv(rcv); - rcv_xdp = rcu_access_pointer(rcv_priv->xdp_prog); + rxq = skb_get_queue_mapping(skb); + if (rxq < rcv->real_num_rx_queues) { + rq = &rcv_priv->rq[rxq]; + rcv_xdp = rcu_access_pointer(rq->xdp_prog); + if (rcv_xdp) + skb_record_rx_queue(skb, rxq); + } - if (likely(veth_forward_skb(rcv, skb, rcv_xdp) == NET_RX_SUCCESS)) { + if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); u64_stats_update_begin(&stats->syncp); @@ -203,7 +214,7 @@ drop: } if (rcv_xdp) - __veth_xdp_flush(rcv_priv); + __veth_xdp_flush(rq); rcu_read_unlock(); @@ -278,12 +289,18 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len, return skb; } +static int veth_select_rxq(struct net_device *dev) +{ + return smp_processor_id() % dev->real_num_rx_queues; +} + static int veth_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *rcv; unsigned int max_len; + struct veth_rq *rq; int i, drops = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -294,30 +311,31 @@ static int veth_xdp_xmit(struct net_device *dev, int n, return -ENXIO; rcv_priv = netdev_priv(rcv); + rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive * side. This means an XDP program is loaded on the peer and the peer * device is up. */ - if (!rcu_access_pointer(rcv_priv->xdp_prog)) + if (!rcu_access_pointer(rq->xdp_prog)) return -ENXIO; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; - spin_lock(&rcv_priv->xdp_ring.producer_lock); + spin_lock(&rq->xdp_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *frame = frames[i]; void *ptr = veth_xdp_to_ptr(frame); if (unlikely(frame->len > max_len || - __ptr_ring_produce(&rcv_priv->xdp_ring, ptr))) { + __ptr_ring_produce(&rq->xdp_ring, ptr))) { xdp_return_frame_rx_napi(frame); drops++; } } - spin_unlock(&rcv_priv->xdp_ring.producer_lock); + spin_unlock(&rq->xdp_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) - __veth_xdp_flush(rcv_priv); + __veth_xdp_flush(rq); return n - drops; } @@ -326,6 +344,7 @@ static void veth_xdp_flush(struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *rcv; + struct veth_rq *rq; rcu_read_lock(); rcv = rcu_dereference(priv->peer); @@ -333,11 +352,12 @@ static void veth_xdp_flush(struct net_device *dev) goto out; rcv_priv = netdev_priv(rcv); + rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* xdp_ring is initialized on receive side? */ - if (unlikely(!rcu_access_pointer(rcv_priv->xdp_prog))) + if (unlikely(!rcu_access_pointer(rq->xdp_prog))) goto out; - __veth_xdp_flush(rcv_priv); + __veth_xdp_flush(rq); out: rcu_read_unlock(); } @@ -352,7 +372,7 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) return veth_xdp_xmit(dev, 1, &frame, 0); } -static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, +static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, struct xdp_frame *frame, unsigned int *xdp_xmit) { @@ -365,7 +385,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, struct sk_buff *skb; rcu_read_lock(); - xdp_prog = rcu_dereference(priv->xdp_prog); + xdp_prog = rcu_dereference(rq->xdp_prog); if (likely(xdp_prog)) { struct xdp_buff xdp; u32 act; @@ -374,7 +394,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, xdp.data = frame->data; xdp.data_end = frame->data + frame->len; xdp.data_meta = frame->data - frame->metasize; - xdp.rxq = &priv->xdp_rxq; + xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -387,8 +407,8 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, orig_frame = *frame; xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; - if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { - trace_xdp_exception(priv->dev, xdp_prog, act); + if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) { + trace_xdp_exception(rq->dev, xdp_prog, act); frame = &orig_frame; goto err_xdp; } @@ -399,7 +419,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, orig_frame = *frame; xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; - if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) { + if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { frame = &orig_frame; goto err_xdp; } @@ -409,7 +429,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(priv->dev, xdp_prog, act); + trace_xdp_exception(rq->dev, xdp_prog, act); case XDP_DROP: goto err_xdp; } @@ -424,7 +444,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, } xdp_scrub_frame(frame); - skb->protocol = eth_type_trans(skb, priv->dev); + skb->protocol = eth_type_trans(skb, rq->dev); err: return skb; err_xdp: @@ -434,8 +454,7 @@ xdp_xmit: return NULL; } -static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, - struct sk_buff *skb, +static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, unsigned int *xdp_xmit) { u32 pktlen, headroom, act, metalen; @@ -445,7 +464,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, struct xdp_buff xdp; rcu_read_lock(); - xdp_prog = rcu_dereference(priv->xdp_prog); + xdp_prog = rcu_dereference(rq->xdp_prog); if (unlikely(!xdp_prog)) { rcu_read_unlock(); goto out; @@ -499,7 +518,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, xdp.data = skb_mac_header(skb); xdp.data_end = xdp.data + pktlen; xdp.data_meta = xdp.data; - xdp.rxq = &priv->xdp_rxq; + xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; orig_data_end = xdp.data_end; @@ -511,9 +530,9 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, case XDP_TX: get_page(virt_to_page(xdp.data)); consume_skb(skb); - xdp.rxq->mem = priv->xdp_mem; - if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { - trace_xdp_exception(priv->dev, xdp_prog, act); + xdp.rxq->mem = rq->xdp_mem; + if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) { + trace_xdp_exception(rq->dev, xdp_prog, act); goto err_xdp; } *xdp_xmit |= VETH_XDP_TX; @@ -522,8 +541,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, case XDP_REDIRECT: get_page(virt_to_page(xdp.data)); consume_skb(skb); - xdp.rxq->mem = priv->xdp_mem; - if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) + xdp.rxq->mem = rq->xdp_mem; + if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) goto err_xdp; *xdp_xmit |= VETH_XDP_REDIR; rcu_read_unlock(); @@ -531,7 +550,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(priv->dev, xdp_prog, act); + trace_xdp_exception(rq->dev, xdp_prog, act); case XDP_DROP: goto drop; } @@ -547,7 +566,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, off = xdp.data_end - orig_data_end; if (off != 0) __skb_put(skb, off); - skb->protocol = eth_type_trans(skb, priv->dev); + skb->protocol = eth_type_trans(skb, rq->dev); metalen = xdp.data - xdp.data_meta; if (metalen) @@ -565,27 +584,26 @@ xdp_xmit: return NULL; } -static int veth_xdp_rcv(struct veth_priv *priv, int budget, - unsigned int *xdp_xmit) +static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit) { int i, done = 0; for (i = 0; i < budget; i++) { - void *ptr = __ptr_ring_consume(&priv->xdp_ring); + void *ptr = __ptr_ring_consume(&rq->xdp_ring); struct sk_buff *skb; if (!ptr) break; if (veth_is_xdp_frame(ptr)) { - skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr), + skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr), xdp_xmit); } else { - skb = veth_xdp_rcv_skb(priv, ptr, xdp_xmit); + skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit); } if (skb) - napi_gro_receive(&priv->xdp_napi, skb); + napi_gro_receive(&rq->xdp_napi, skb); done++; } @@ -595,25 +613,25 @@ static int veth_xdp_rcv(struct veth_priv *priv, int budget, static int veth_poll(struct napi_struct *napi, int budget) { - struct veth_priv *priv = - container_of(napi, struct veth_priv, xdp_napi); + struct veth_rq *rq = + container_of(napi, struct veth_rq, xdp_napi); unsigned int xdp_xmit = 0; int done; xdp_set_return_frame_no_direct(); - done = veth_xdp_rcv(priv, budget, &xdp_xmit); + done = veth_xdp_rcv(rq, budget, &xdp_xmit); if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ - smp_store_mb(priv->rx_notify_masked, false); - if (unlikely(!__ptr_ring_empty(&priv->xdp_ring))) { - priv->rx_notify_masked = true; - napi_schedule(&priv->xdp_napi); + smp_store_mb(rq->rx_notify_masked, false); + if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { + rq->rx_notify_masked = true; + napi_schedule(&rq->xdp_napi); } } if (xdp_xmit & VETH_XDP_TX) - veth_xdp_flush(priv->dev); + veth_xdp_flush(rq->dev); if (xdp_xmit & VETH_XDP_REDIR) xdp_do_flush_map(); xdp_clear_return_frame_no_direct(); @@ -624,56 +642,90 @@ static int veth_poll(struct napi_struct *napi, int budget) static int veth_napi_add(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); - int err; + int err, i; - err = ptr_ring_init(&priv->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); - if (err) - return err; + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); + if (err) + goto err_xdp_ring; + } - netif_napi_add(dev, &priv->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); - napi_enable(&priv->xdp_napi); + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + napi_enable(&rq->xdp_napi); + } return 0; +err_xdp_ring: + for (i--; i >= 0; i--) + ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); + + return err; } static void veth_napi_del(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); + int i; - napi_disable(&priv->xdp_napi); - netif_napi_del(&priv->xdp_napi); - priv->rx_notify_masked = false; - ptr_ring_cleanup(&priv->xdp_ring, veth_ptr_free); + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + napi_disable(&rq->xdp_napi); + napi_hash_del(&rq->xdp_napi); + } + synchronize_net(); + + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + netif_napi_del(&rq->xdp_napi); + rq->rx_notify_masked = false; + ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); + } } static int veth_enable_xdp(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); - int err; + int err, i; - if (!xdp_rxq_info_is_reg(&priv->xdp_rxq)) { - err = xdp_rxq_info_reg(&priv->xdp_rxq, dev, 0); - if (err < 0) - return err; + if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; - err = xdp_rxq_info_reg_mem_model(&priv->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err < 0) - goto err; + err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i); + if (err < 0) + goto err_rxq_reg; + + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (err < 0) + goto err_reg_mem; + + /* Save original mem info as it can be overwritten */ + rq->xdp_mem = rq->xdp_rxq.mem; + } err = veth_napi_add(dev); if (err) - goto err; - - /* Save original mem info as it can be overwritten */ - priv->xdp_mem = priv->xdp_rxq.mem; + goto err_rxq_reg; } - rcu_assign_pointer(priv->xdp_prog, priv->_xdp_prog); + for (i = 0; i < dev->real_num_rx_queues; i++) + rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); return 0; -err: - xdp_rxq_info_unreg(&priv->xdp_rxq); +err_reg_mem: + xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); +err_rxq_reg: + for (i--; i >= 0; i--) + xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); return err; } @@ -681,11 +733,17 @@ err: static void veth_disable_xdp(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); + int i; - rcu_assign_pointer(priv->xdp_prog, NULL); + for (i = 0; i < dev->real_num_rx_queues; i++) + rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); veth_napi_del(dev); - priv->xdp_rxq.mem = priv->xdp_mem; - xdp_rxq_info_unreg(&priv->xdp_rxq); + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + rq->xdp_rxq.mem = rq->xdp_mem; + xdp_rxq_info_unreg(&rq->xdp_rxq); + } } static int veth_open(struct net_device *dev) @@ -842,6 +900,12 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } + if (dev->real_num_rx_queues < peer->real_num_tx_queues) { + NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues"); + err = -ENOSPC; + goto err; + } + if (dev->flags & IFF_UP) { err = veth_enable_xdp(dev); if (err) { @@ -976,13 +1040,31 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static int veth_alloc_queues(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); + if (!priv->rq) + return -ENOMEM; + + return 0; +} + +static void veth_free_queues(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + kfree(priv->rq); +} + static struct rtnl_link_ops veth_link_ops; static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - int err; + int err, i; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; @@ -1035,6 +1117,12 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, return PTR_ERR(peer); } + err = veth_alloc_queues(peer); + if (err) { + put_net(net); + goto err_peer_alloc_queues; + } + if (!ifmp || !tbp[IFLA_ADDRESS]) eth_hw_addr_random(peer); @@ -1063,6 +1151,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, * should be re-allocated */ + err = veth_alloc_queues(dev); + if (err) + goto err_alloc_queues; + if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); @@ -1082,22 +1174,28 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, */ priv = netdev_priv(dev); - priv->dev = dev; + for (i = 0; i < dev->real_num_rx_queues; i++) + priv->rq[i].dev = dev; rcu_assign_pointer(priv->peer, peer); priv = netdev_priv(peer); - priv->dev = peer; + for (i = 0; i < peer->real_num_rx_queues; i++) + priv->rq[i].dev = peer; rcu_assign_pointer(priv->peer, dev); return 0; err_register_dev: + veth_free_queues(dev); +err_alloc_queues: /* nothing to do */ err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: + veth_free_queues(peer); +err_peer_alloc_queues: free_netdev(peer); return err; } -- cgit v1.2.3