diff options
author | Vlad Buslov <vladbu@mellanox.com> | 2019-08-26 16:44:59 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-08-26 14:17:43 -0700 |
commit | 401192113730947572d280ec465555ab9ff5a597 (patch) | |
tree | b08b227205e6302e2a8a31992b381f5e28173261 /include | |
parent | 97394bef5622cb32fd1e5d152251090da6c238b9 (diff) |
net: sched: refactor block offloads counter usage
Without rtnl lock protection filters can no longer safely manage block
offloads counter themselves. Refactor cls API to protect block offloadcnt
with tcf_block->cb_lock that is already used to protect driver callback
list and nooffloaddevcnt counter. The counter can be modified by concurrent
tasks by new functions that execute block callbacks (which is safe with
previous patch that changed its type to atomic_t), however, block
bind/unbind code that checks the counter value takes cb_lock in write mode
to exclude any concurrent modifications. This approach prevents race
conditions between bind/unbind and callback execution code but allows for
concurrency for tc rule update path.
Move block offload counter, filter in hardware counter and filter flags
management from classifiers into cls hardware offloads API. Make functions
tcf_block_offload_{inc|dec}() and tc_cls_offload_cnt_update() to be cls API
private. Implement following new cls API to be used instead:
tc_setup_cb_add() - non-destructive filter add. If filter that wasn't
already in hardware is successfully offloaded, increment block offloads
counter, set filter in hardware counter and flag. On failure, previously
offloaded filter is considered to be intact and offloads counter is not
decremented.
tc_setup_cb_replace() - destructive filter replace. Release existing
filter block offload counter and reset its in hardware counter and flag.
Set new filter in hardware counter and flag. On failure, previously
offloaded filter is considered to be destroyed and offload counter is
decremented.
tc_setup_cb_destroy() - filter destroy. Unconditionally decrement block
offloads counter.
tc_setup_cb_reoffload() - reoffload filter to single cb. Execute cb() and
call tc_cls_offload_cnt_update() if cb() didn't return an error.
Refactor all offload-capable classifiers to atomically offload filters to
hardware, change block offload counter, and set filter in hardware counter
and flag by means of the new cls API functions.
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/pkt_cls.h | 17 | ||||
-rw-r--r-- | include/net/sch_generic.h | 31 |
2 files changed, 16 insertions, 32 deletions
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 64999ffcb486..612232492f67 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -506,7 +506,22 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) int tc_setup_flow_action(struct flow_action *flow_action, const struct tcf_exts *exts); int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, - void *type_data, bool err_stop); + void *type_data, bool err_stop, bool rtnl_held); +int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, + enum tc_setup_type type, void *type_data, bool err_stop, + u32 *flags, unsigned int *in_hw_count, bool rtnl_held); +int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, + enum tc_setup_type type, void *type_data, bool err_stop, + u32 *old_flags, unsigned int *old_in_hw_count, + u32 *new_flags, unsigned int *new_in_hw_count, + bool rtnl_held); +int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, + enum tc_setup_type type, void *type_data, bool err_stop, + u32 *flags, unsigned int *in_hw_count, bool rtnl_held); +int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, + bool add, flow_setup_cb_t *cb, + enum tc_setup_type type, void *type_data, + void *cb_priv, u32 *flags, unsigned int *in_hw_count); unsigned int tcf_exts_num_actions(struct tcf_exts *exts); struct tc_cls_u32_knode { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d778c502decd..f90e3b2a3065 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -439,37 +439,6 @@ static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) #define tcf_proto_dereference(p, tp) \ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) -static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) -{ - if (*flags & TCA_CLS_FLAGS_IN_HW) - return; - *flags |= TCA_CLS_FLAGS_IN_HW; - atomic_inc(&block->offloadcnt); -} - -static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) -{ - if (!(*flags & TCA_CLS_FLAGS_IN_HW)) - return; - *flags &= ~TCA_CLS_FLAGS_IN_HW; - atomic_dec(&block->offloadcnt); -} - -static inline void -tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt, - u32 *flags, bool add) -{ - if (add) { - if (!*cnt) - tcf_block_offload_inc(block, flags); - (*cnt)++; - } else { - (*cnt)--; - if (!*cnt) - tcf_block_offload_dec(block, flags); - } -} - static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; |