diff options
author | Vlad Buslov <vladbu@mellanox.com> | 2019-08-03 21:43:06 +0300 |
---|---|---|
committer | Saeed Mahameed <saeedm@mellanox.com> | 2019-08-21 15:55:17 -0700 |
commit | 6a06c2f7843d85b43ccea6e89de8e432834c089b (patch) | |
tree | 8e1525686cffc7f57e215acf9b4542277228c862 | |
parent | ac0d917632cf7fbbe953f2ec82c2c979ab1b4a06 (diff) |
net/mlx5e: Refactor neigh used value update for concurrent execution
In order to remove dependency on rtnl lock and allow neigh used value
update workqueue task to execute concurrently with tc, refactor
mlx5e_tc_update_neigh_used_value() for concurrent execution:
- Lock encap table when accessing encap entry to prevent concurrent
changes.
- Save offloaded encap flows to temporary list and release them after encap
entry is updated. Add mlx5e_put_encap_flow_list() helper which is
intended to be shared with neigh update code in following patch in this
series. This is necessary because mlx5e_flow_put() can't be called while
holding encap_tbl_lock.
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a4d11274be30..3a562189af71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -126,6 +126,7 @@ struct mlx5e_tc_flow { struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ + struct list_head tmp_list; /* temporary flow list used by neigh update */ refcount_t refcnt; struct rcu_head rcu_head; union { @@ -1412,6 +1413,15 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) return flow->nic_attr->counter; } +/* Iterate over tmp_list of flows attached to flow_list head. */ +static void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) +{ + struct mlx5e_tc_flow *flow, *tmp; + + list_for_each_entry_safe(flow, tmp, flow_list, tmp_list) + mlx5e_flow_put(priv, flow); +} + static struct mlx5e_encap_entry * mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, struct mlx5e_encap_entry *e) @@ -1481,30 +1491,35 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) * next one. */ while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) { + struct mlx5e_priv *priv = netdev_priv(e->out_dev); struct encap_flow_item *efi, *tmp; + struct mlx5_eswitch *esw; + LIST_HEAD(flow_list); + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); list_for_each_entry_safe(efi, tmp, &e->flows, list) { flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); if (IS_ERR(mlx5e_flow_get(flow))) continue; + list_add(&flow->tmp_list, &flow_list); if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); lastuse = mlx5_fc_query_lastuse(counter); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { - mlx5e_flow_put(netdev_priv(e->out_dev), flow); neigh_used = true; break; } } - - mlx5e_flow_put(netdev_priv(e->out_dev), flow); } + mutex_unlock(&esw->offloads.encap_tbl_lock); + mlx5e_put_encap_flow_list(priv, &flow_list); if (neigh_used) { /* release current encap before breaking the loop */ - mlx5e_encap_put(netdev_priv(e->out_dev), e); + mlx5e_encap_put(priv, e); break; } } |