diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2021-03-28 23:08:55 +0200 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2021-03-31 22:34:11 +0200 |
commit | 8b9229d15877ec77775633f058d14145f6eb98fa (patch) | |
tree | fce382c7beb58215e6e6ad99bc8b36a8999fda87 /net/netfilter | |
parent | 5c701e71961af0ec8227ea615f1646dbe98aea1a (diff) |
netfilter: flowtable: dst_check() from garbage collector path
Move dst_check() to the garbage collector path. Stale routes trigger the
flow entry teardown state which makes affected flows go back to the
classic forwarding path to re-evaluate flow offloading.
IPv6 requires the dst cookie to work, store it in the flow_tuple,
otherwise dst_check() always fails.
Fixes: e5075c0badaa ("netfilter: flowtable: call dst_check() to fall back to classic forwarding")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter')
-rw-r--r-- | net/netfilter/nf_flow_table_core.c | 37 | ||||
-rw-r--r-- | net/netfilter/nf_flow_table_ip.c | 22 |
2 files changed, 40 insertions, 19 deletions
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 1bce1d2805c4..76573bae6664 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -74,6 +74,18 @@ err_ct_refcnt: } EXPORT_SYMBOL_GPL(flow_offload_alloc); +static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple) +{ + const struct rt6_info *rt; + + if (flow_tuple->l3proto == NFPROTO_IPV6) { + rt = (const struct rt6_info *)flow_tuple->dst_cache; + return rt6_get_cookie(rt); + } + + return 0; +} + static int flow_offload_fill_route(struct flow_offload *flow, const struct nf_flow_route *route, enum flow_offload_tuple_dir dir) @@ -116,6 +128,7 @@ static int flow_offload_fill_route(struct flow_offload *flow, return -1; flow_tuple->dst_cache = dst; + flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple); break; } flow_tuple->xmit_type = route->tuple[dir].xmit_type; @@ -390,11 +403,33 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, return err; } +static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple) +{ + struct dst_entry *dst; + + if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || + tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { + dst = tuple->dst_cache; + if (!dst_check(dst, tuple->dst_cookie)) + return true; + } + + return false; +} + +static bool nf_flow_has_stale_dst(struct flow_offload *flow) +{ + return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) || + flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple); +} + static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) { struct nf_flowtable *flow_table = data; - if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct)) + if (nf_flow_has_expired(flow) || + nf_ct_is_dying(flow->ct) || + nf_flow_has_stale_dst(flow)) set_bit(NF_FLOW_TEARDOWN, &flow->flags); if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) { diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 12cb0cc6958c..889cf88d3dba 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -364,15 +364,6 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) return NF_ACCEPT; - if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || - tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { - rt = (struct rtable *)tuplehash->tuple.dst_cache; - if (!dst_check(&rt->dst, 0)) { - flow_offload_teardown(flow); - return NF_ACCEPT; - } - } - if (skb_try_make_writable(skb, thoff + hdrsize)) return NF_DROP; @@ -391,6 +382,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { + rt = (struct rtable *)tuplehash->tuple.dst_cache; memset(skb->cb, 0, sizeof(struct inet_skb_parm)); IPCB(skb)->iif = skb->dev->ifindex; IPCB(skb)->flags = IPSKB_FORWARDED; @@ -399,6 +391,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, switch (tuplehash->tuple.xmit_type) { case FLOW_OFFLOAD_XMIT_NEIGH: + rt = (struct rtable *)tuplehash->tuple.dst_cache; outdev = rt->dst.dev; skb->dev = outdev; nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); @@ -607,15 +600,6 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) return NF_ACCEPT; - if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || - tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) { - rt = (struct rt6_info *)tuplehash->tuple.dst_cache; - if (!dst_check(&rt->dst, 0)) { - flow_offload_teardown(flow); - return NF_ACCEPT; - } - } - if (skb_try_make_writable(skb, thoff + hdrsize)) return NF_DROP; @@ -633,6 +617,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { + rt = (struct rt6_info *)tuplehash->tuple.dst_cache; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); IP6CB(skb)->iif = skb->dev->ifindex; IP6CB(skb)->flags = IP6SKB_FORWARDED; @@ -641,6 +626,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, switch (tuplehash->tuple.xmit_type) { case FLOW_OFFLOAD_XMIT_NEIGH: + rt = (struct rt6_info *)tuplehash->tuple.dst_cache; outdev = rt->dst.dev; skb->dev = outdev; nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); |