diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2018-09-03 09:55:07 +0200 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-09-06 22:34:08 -0700 |
commit | 47b123ed9e99b064dd2b250e1b62e1d91dd876ee (patch) | |
tree | 27b71b96b126afcd8985e03153554ccc10305c35 /net | |
parent | 2a68d85fe1b7511731dedf73c4003fec2880f1ce (diff) |
xdp: split code for map vs non-map redirect
The compiler does an efficient job of inlining static C functions.
Perf top clearly shows that almost everything gets inlined into the
function call xdp_do_redirect.
The function xdp_do_redirect end-up containing and interleaving the
map and non-map redirect code. This is sub-optimal, as it would be
strange for an XDP program to use both types of redirect in the same
program. The two use-cases are separate, and interleaving the code
just cause more instruction-cache pressure.
I would like to stress (again) that the non-map variant bpf_redirect
is very slow compared to the bpf_redirect_map variant, approx half the
speed. Measured with driver i40e the difference is:
- map redirect: 13,250,350 pps
- non-map redirect: 7,491,425 pps
For this reason, the function name of the non-map variant of redirect
have been called xdp_do_redirect_slow. This hopefully gives a hint
when using perf, that this is not the optimal XDP redirect operating mode.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/filter.c | 52 |
1 files changed, 30 insertions, 22 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index d7dbe412cb9e..8cb242b4400f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3175,6 +3175,32 @@ static int __bpf_tx_xdp(struct net_device *dev, return 0; } +static noinline int +xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri) +{ + struct net_device *fwd; + u32 index = ri->ifindex; + int err; + + fwd = dev_get_by_index_rcu(dev_net(dev), index); + ri->ifindex = 0; + if (unlikely(!fwd)) { + err = -EINVAL; + goto err; + } + + err = __bpf_tx_xdp(fwd, NULL, xdp, 0); + if (unlikely(err)) + goto err; + + _trace_xdp_redirect(dev, xdp_prog, index); + return 0; +err: + _trace_xdp_redirect_err(dev, xdp_prog, index, err); + return err; +} + static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_map *map, struct xdp_buff *xdp, @@ -3269,9 +3295,9 @@ void bpf_clear_redirect_map(struct bpf_map *map) } static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct bpf_map *map) + struct bpf_prog *xdp_prog, struct bpf_map *map, + struct bpf_redirect_info *ri) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); u32 index = ri->ifindex; void *fwd = NULL; int err; @@ -3304,29 +3330,11 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); - struct net_device *fwd; - u32 index = ri->ifindex; - int err; if (likely(map)) - return xdp_do_redirect_map(dev, xdp, xdp_prog, map); + return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); - fwd = dev_get_by_index_rcu(dev_net(dev), index); - ri->ifindex = 0; - if (unlikely(!fwd)) { - err = -EINVAL; - goto err; - } - - err = __bpf_tx_xdp(fwd, NULL, xdp, 0); - if (unlikely(err)) - goto err; - - _trace_xdp_redirect(dev, xdp_prog, index); - return 0; -err: - _trace_xdp_redirect_err(dev, xdp_prog, index, err); - return err; + return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri); } EXPORT_SYMBOL_GPL(xdp_do_redirect); |