diff options
author | David S. Miller <davem@davemloft.net> | 2019-08-29 17:25:18 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-08-29 17:25:18 -0700 |
commit | 1a4f1a024c4f53715d14ddd7273bba9dc6dde578 (patch) | |
tree | 2072b6810fac872000cf2d8319dca46a07b42d73 /drivers/net | |
parent | 42aa15cf05c0a47cc5807c21c7ff471b80cad371 (diff) | |
parent | 25948b87dda284664edeb3b3dab689df0a7dc889 (diff) |
Merge tag 'mlx5-updates-2019-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
mlx5-updates-2019-08-22
Misc updates for mlx5e net device driver
1) Maxim and Tariq add the support for LAG TX port affinity distribution
When VF LAG is enabled, VFs netdevs will round-robin the TX affinity
of their tx queues among the different LAG ports.
2) Aya adds the support for ip-in-ip RSS.
3) Marina adds the support for ip-in-ip TX TSO and checksum offloads.
4) Moshe adds a device internal drop counter to mlx5 ethtool stats.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
20 files changed, 280 insertions, 112 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 973f90888b1f..ea934cd02448 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_UMEM: case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: + case MLX5_CMD_OP_MODIFY_XRQ: + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -637,6 +639,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DESTROY_UCTX); MLX5_COMMAND_STR_CASE(CREATE_UMEM); MLX5_COMMAND_STR_CASE(DESTROY_UMEM); + MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); + MLX5_COMMAND_STR_CASE(MODIFY_XRQ); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 8a4930c8bf62..2011eaf15cc5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer, trace_data->timestamp = timestamp; trace_data->lost = lost; trace_data->event_id = event_id; - strncpy(trace_data->msg, msg, TRACE_STR_MSG); + strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG); tracer->st_arr.saved_traces_index = (tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 446792799125..8d76452cacdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -163,6 +163,14 @@ enum mlx5e_rq_group { #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g) }; +static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) +{ + if (mlx5_lag_is_lacp_owner(mdev)) + return 1; + + return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); +} + static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) { switch (wq_type) { @@ -705,6 +713,7 @@ struct mlx5e_channel { struct net_device *netdev; __be32 mkey_be; u8 num_tc; + u8 lag_port; /* XDP_REDIRECT */ struct mlx5e_xdpsq xdpsq; @@ -818,7 +827,7 @@ struct mlx5e_priv { struct mlx5e_rq drop_rq; struct mlx5e_channels channels; - u32 tisn[MLX5E_MAX_NUM_TC]; + u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; struct mlx5e_rqt indir_rqt; struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; @@ -1056,12 +1065,6 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_tx_disable_queue(struct netdev_queue *txq); -static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) -{ - return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); -} - static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { return MLX5_CAP_ETH(mdev, swp) && @@ -1107,6 +1110,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); int mlx5e_create_tises(struct mlx5e_priv *priv); +void mlx5e_destroy_tises(struct mlx5e_priv *priv); int mlx5e_update_nic_rx(struct mlx5e_priv *priv); void mlx5e_update_carrier(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index ca2161b42c7f..68d593074f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -95,9 +95,15 @@ struct mlx5e_tirc_config { enum mlx5e_tunnel_types { MLX5E_TT_IPV4_GRE, MLX5E_TT_IPV6_GRE, + MLX5E_TT_IPV4_IPIP, + MLX5E_TT_IPV6_IPIP, + MLX5E_TT_IPV4_IPV6, + MLX5E_TT_IPV6_IPV6, MLX5E_NUM_TUNNEL_TT, }; +bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); + /* L3/L4 traffic type classifier */ struct mlx5e_ttc_table { struct mlx5e_flow_table ft; @@ -232,5 +238,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); +bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type); +bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev); + #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 7347d673f448..c5a9c20d7f00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1431,7 +1431,7 @@ static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev) return ret; } -static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode) +static __u32 mlx5e_reformat_wol_mode_mlx5_to_linux(u8 mode) { __u32 ret = 0; @@ -1459,7 +1459,7 @@ static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode) return ret; } -static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode) +static u8 mlx5e_reformat_wol_mode_linux_to_mlx5(__u32 mode) { u8 ret = 0; @@ -1505,7 +1505,7 @@ static void mlx5e_get_wol(struct net_device *netdev, if (err) return; - wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode); + wol->wolopts = mlx5e_reformat_wol_mode_mlx5_to_linux(mlx5_wol_mode); } static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -1521,7 +1521,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~wol_supported) return -EINVAL; - mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts); + mlx5_wol_mode = mlx5e_reformat_wol_mode_linux_to_mlx5(wol->wolopts); return mlx5_set_port_wol(mdev, mlx5_wol_mode); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 76cc10e44080..15b7f0f1427c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -747,8 +747,55 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { .etype = ETH_P_IPV6, .proto = IPPROTO_GRE, }, + [MLX5E_TT_IPV4_IPIP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPIP, + }, + [MLX5E_TT_IPV6_IPIP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPIP, + }, + [MLX5E_TT_IPV4_IPV6] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPV6, + }, + [MLX5E_TT_IPV6_IPV6] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPV6, + }, + }; +bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) +{ + switch (proto_type) { + case IPPROTO_GRE: + return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); + case IPPROTO_IPIP: + case IPPROTO_IPV6: + return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); + default: + return false; + } +} + +bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) +{ + int tt; + + for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) + return true; + } + return false; +} + +bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) +{ + return (mlx5e_any_tunnel_proto_supported(mdev) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); +} + static u8 mlx5e_etype_to_ipv(u16 ethertype) { if (ethertype == ETH_P_IP) @@ -838,6 +885,9 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = params->inner_ttc->ft.t; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + if (!mlx5e_tunnel_proto_supported(priv->mdev, + ttc_tunnel_rules[tt].proto)) + continue; rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_tunnel_rules[tt].etype, ttc_tunnel_rules[tt].proto); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8592b98d0e70..9ff28e2d72cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1442,7 +1442,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, return err; csp.tis_lst_sz = 1; - csp.tisn = c->priv->tisn[0]; /* tc = 0 */ + csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; @@ -1692,7 +1692,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, for (tc = 0; tc < params->num_tc; tc++) { int txq_ix = c->ix + tc * priv->max_nch; - err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, + err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, params, &cparam->sq, &c->sq[tc], tc); if (err) goto err_close_sqs; @@ -1926,6 +1926,13 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_cq(&c->icosq.cq); } +static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) +{ + u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id); + + return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_params *params, struct mlx5e_channel_param *cparam, @@ -1960,6 +1967,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; c->irq_desc = irq_to_desc(irq); + c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); err = mlx5e_alloc_xps_cpumask(c, params); if (err) @@ -3179,39 +3187,58 @@ void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) mlx5_core_destroy_tis(mdev, tisn); } +void mlx5e_destroy_tises(struct mlx5e_priv *priv) +{ + int tc, i; + + for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) + for (tc = 0; tc < priv->profile->max_tc; tc++) + mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); +} + +static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; +} + int mlx5e_create_tises(struct mlx5e_priv *priv) { + int tc, i; int err; - int tc; - for (tc = 0; tc < priv->profile->max_tc; tc++) { - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; - void *tisc; + for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) { + for (tc = 0; tc < priv->profile->max_tc; tc++) { + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; + void *tisc; - tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - MLX5_SET(tisc, tisc, prio, tc << 1); + MLX5_SET(tisc, tisc, prio, tc << 1); - err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]); - if (err) - goto err_close_tises; + if (mlx5e_lag_should_assign_affinity(priv->mdev)) + MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); + + err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); + if (err) + goto err_close_tises; + } } return 0; err_close_tises: - for (tc--; tc >= 0; tc--) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + for (; i >= 0; i--) { + for (tc--; tc >= 0; tc--) + mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); + tc = priv->profile->max_tc; + } return err; } static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { - int tc; - - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5e_destroy_tises(priv); } static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, @@ -4216,6 +4243,8 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, switch (proto) { case IPPROTO_GRE: + case IPPROTO_IPIP: + case IPPROTO_IPV6: return features; case IPPROTO_UDP: udph = udp_hdr(skb); @@ -4852,7 +4881,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || - MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + mlx5e_any_tunnel_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; @@ -4867,7 +4896,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } - if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_GRE | @@ -4876,6 +4905,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM; } + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { + netdev->hw_features |= NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6; + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6; + netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6; + } + netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= NETIF_F_GSO_UDP_L4; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e7ac6233037d..1623cd32f303 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1621,7 +1621,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_rep_uplink_priv *uplink_priv; - int tc, err; + int err; err = mlx5e_create_tises(priv); if (err) { @@ -1657,18 +1657,15 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) tc_esw_cleanup: mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); destroy_tises: - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5e_destroy_tises(priv); return err; } static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - int tc; - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5e_destroy_tises(priv); if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { /* clean indirect TC block notifications */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 18e4c162256a..f1065e78086a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -369,17 +369,27 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) } #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) -static const struct counter_desc vnic_env_stats_desc[] = { +static const struct counter_desc vnic_env_stats_steer_desc[] = { { "rx_steer_missed_packets", VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, }; -#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc) +static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { + { "dev_internal_queue_oob", + VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, +}; + +#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ + ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) +#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ + ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv) { - return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ? - NUM_VNIC_ENV_COUNTERS : 0; + return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + + NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); } static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -387,12 +397,13 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, { int i; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) - return idx; + for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vnic_env_stats_steer_desc[i].format); - for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - vnic_env_stats_desc[i].format); + vnic_env_stats_dev_oob_desc[i].format); return idx; } @@ -401,12 +412,13 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data, { int i; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) - return idx; - - for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, - vnic_env_stats_desc, i); + vnic_env_stats_steer_desc, i); + + for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) + data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_dev_oob_desc, i); return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 09d4c64b6e73..580c71cb9dfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -324,10 +324,13 @@ err_buf: /** * mlx5_eq_enable - Enable EQ for receiving EQEs - * @dev - Device which owns the eq - * @eq - EQ to enable - * @nb - notifier call block - * mlx5_eq_enable - must be called after EQ is created in device. + * @dev : Device which owns the eq + * @eq : EQ to enable + * @nb : Notifier call block + * + * Must be called after EQ is created in device. + * + * @return: 0 if no error */ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct notifier_block *nb) @@ -344,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, EXPORT_SYMBOL(mlx5_eq_enable); /** - * mlx5_eq_disable - Enable EQ for receiving EQEs - * @dev - Device which owns the eq - * @eq - EQ to disable - * @nb - notifier call block - * mlx5_eq_disable - must be called before EQ is destroyed. + * mlx5_eq_disable - Disable EQ for receiving EQEs + * @dev : Device which owns the eq + * @eq : EQ to disable + * @nb : Notifier call block + * + * Must be called before EQ is destroyed. */ void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct notifier_block *nb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f0692407f617..30aae76b6a1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1413,7 +1413,7 @@ out: static bool element_type_supported(struct mlx5_eswitch *esw, int type) { - struct mlx5_core_dev *dev = esw->dev = esw->dev; + const struct mlx5_core_dev *dev = esw->dev; switch (type) { case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index b84a225bbe86..1e3381604b3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -182,7 +182,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, } else { MLX5_SET(create_flow_table_in, in, flow_table_context.table_miss_action, - ns->def_miss_action); + ft->def_miss_action); } break; @@ -262,7 +262,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, } else { MLX5_SET(modify_flow_table_in, in, flow_table_context.table_miss_action, - ns->def_miss_action); + ft->def_miss_action); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 3e99799bdb40..7bdec442f0ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -60,7 +60,8 @@ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ __VA_ARGS__)\ -#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ +#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \ + .def_miss_action = def_miss_act,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } @@ -131,33 +132,41 @@ static struct init_tree_node { int num_leaf_prios; int prio; int num_levels; + enum mlx5_flow_table_miss_action def_miss_action; } root_fs = { .type = FS_TYPE_NAMESPACE, .ar_size = 7, - .children = (struct init_tree_node[]) { - ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, - BY_PASS_PRIO_NUM_LEVELS))), - ADD_PRIO(0, LAG_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, - LAG_PRIO_NUM_LEVELS))), - ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), - ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, - ETHTOOL_PRIO_NUM_LEVELS))), - ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), - ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, - KERNEL_NIC_PRIO_NUM_LEVELS))), - ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), - ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), + .children = (struct init_tree_node[]){ + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, + LAG_PRIO_NUM_LEVELS))), + ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, + OFFLOADS_MAX_FT))), + ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, + ETHTOOL_PRIO_NUM_LEVELS))), + ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, + KERNEL_NIC_TC_NUM_LEVELS), + ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, + KERNEL_NIC_PRIO_NUM_LEVELS))), + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, + LEFTOVERS_NUM_LEVELS))), + ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, + ANCHOR_NUM_LEVELS))), } }; @@ -167,8 +176,29 @@ static struct init_tree_node egress_root_fs = { .children = (struct init_tree_node[]) { ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, FS_CHAINING_CAPS_EGRESS, - ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + } +}; + +#define RDMA_RX_BYPASS_PRIO 0 +#define RDMA_RX_KERNEL_PRIO 1 +static struct init_tree_node rdma_rx_root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 2, + .children = (struct init_tree_node[]) { + [RDMA_RX_BYPASS_PRIO] = + ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + [RDMA_RX_KERNEL_PRIO] = + ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, + ADD_MULTIPLE_PRIO(1, 1))), } }; @@ -1014,6 +1044,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); + ft->def_miss_action = ns->def_miss_action; err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); if (err) goto free_ft; @@ -2056,16 +2087,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, if (steering->sniffer_tx_root_ns) return &steering->sniffer_tx_root_ns->ns; return NULL; - case MLX5_FLOW_NAMESPACE_RDMA_RX: - if (steering->rdma_rx_root_ns) - return &steering->rdma_rx_root_ns->ns; - return NULL; default: break; } if (type == MLX5_FLOW_NAMESPACE_EGRESS) { root_ns = steering->egress_root_ns; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_BYPASS_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_KERNEL_PRIO; } else { /* Must be NIC RX */ root_ns = steering->root_ns; prio = type; @@ -2155,7 +2188,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace return ns; } -static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) +static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio, + int def_miss_act) { struct mlx5_flow_namespace *ns; @@ -2164,6 +2198,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ERR_PTR(-ENOMEM); fs_init_namespace(ns); + ns->def_miss_action = def_miss_act; tree_init_node(&ns->node, NULL, del_sw_ns); tree_add_node(&ns->node, &prio->node); list_add_tail(&ns->node.list, &prio->node.children); @@ -2230,7 +2265,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering, base = &fs_prio->node; } else if (init_node->type == FS_TYPE_NAMESPACE) { fs_get_obj(fs_prio, fs_parent_node); - fs_ns = fs_create_namespace(fs_prio); + fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action); if (IS_ERR(fs_ns)) return PTR_ERR(fs_ns); base = &fs_ns->node; @@ -2494,18 +2529,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) { - struct fs_prio *prio; + int err; steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX); if (!steering->rdma_rx_root_ns) return -ENOMEM; - steering->rdma_rx_root_ns->def_miss_action = - MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN; + err = init_root_tree(steering, &rdma_rx_root_fs, + &steering->rdma_rx_root_ns->ns.node); + if (err) + goto out_err; - /* Create single prio */ - prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1); - return PTR_ERR_OR_ZERO(prio); + set_prio_attrs(steering->rdma_rx_root_ns); + + return 0; + +out_err: + cleanup_root_ns(steering->rdma_rx_root_ns); + steering->rdma_rx_root_ns = NULL; + return err; } static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { @@ -2543,7 +2585,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) } for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { - ns = fs_create_namespace(maj_prio); + ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); if (IS_ERR(ns)) { err = PTR_ERR(ns); goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index c1252d6be0ef..0d16b4b5ab83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -145,6 +145,7 @@ struct mlx5_flow_table { struct list_head fwd_rules; u32 flags; struct rhltable fgs_hash; + enum mlx5_flow_table_miss_action def_miss_action; }; struct mlx5_ft_underlay_qp { @@ -191,6 +192,7 @@ struct fs_prio { struct mlx5_flow_namespace { /* parent == NULL => root ns */ struct fs_node node; + enum mlx5_flow_table_miss_action def_miss_action; }; struct mlx5_flow_group_mask { @@ -219,7 +221,6 @@ struct mlx5_flow_root_namespace { struct mutex chain_lock; struct list_head underlay_qpns; const struct mlx5_flow_cmds *cmds; - enum mlx5_flow_table_miss_action def_miss_action; }; int mlx5_init_fc_stats(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1a2560e3bf7c..3ed8ab2d703d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -279,7 +279,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } - err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]); + err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); goto err_destroy_underlay_qp; @@ -296,7 +296,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; - mlx5e_destroy_tis(priv->mdev, priv->tisn[0]); + mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index c5a491e22e55..96e64187c089 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) goto err_unint_underlay_qp; } - err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]); + err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]); if (err) { mlx5_core_warn(mdev, "create child tis failed, %d\n", err); goto err_remove_rx_uderlay_qp; @@ -228,7 +228,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) return 0; err_clear_state_opened_flag: - mlx5e_destroy_tis(mdev, epriv->tisn[0]); + mlx5e_destroy_tis(mdev, epriv->tisn[0][0]); err_remove_rx_uderlay_qp: mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); err_unint_underlay_qp: @@ -257,7 +257,7 @@ static int mlx5i_pkey_close(struct net_device *netdev) mlx5i_uninit_underlay_qp(priv); mlx5e_deactivate_priv_channels(priv); mlx5e_close_channels(&priv->channels); - mlx5e_destroy_tis(mdev, priv->tisn[0]); + mlx5e_destroy_tis(mdev, priv->tisn[0][0]); unlock: mutex_unlock(&priv->state_lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index ea1d4d26ece0..3fc575d1c3ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -2,6 +2,7 @@ // Copyright (c) 2019 Mellanox Technologies. #include "mlx5_core.h" +#include "lib/mlx5.h" int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, void *key, u32 sz_bytes, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 61388ca7233b..dee1a8658c87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -496,6 +496,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev) ODP_CAP_SET_MAX(dev, xrc_odp_caps.write); ODP_CAP_SET_MAX(dev, xrc_odp_caps.read); ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic); + ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, dc_odp_caps.send); + ODP_CAP_SET_MAX(dev, dc_odp_caps.receive); + ODP_CAP_SET_MAX(dev, dc_odp_caps.write); + ODP_CAP_SET_MAX(dev, dc_odp_caps.read); + ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic); if (do_set) err = set_caps(dev, set_ctx, set_sz, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b8ba74de9555..c3aea4cc2fff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) common = radix_tree_lookup(&table->tree, rsn); if (common) - atomic_inc(&common->refcount); + refcount_inc(&common->refcount); spin_unlock_irqrestore(&table->lock, flags); @@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) { - if (atomic_dec_and_test(&common->refcount)) + if (refcount_dec_and_test(&common->refcount)) complete(&common->free); } @@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb, common = mlx5_get_rsc(table, rsn); if (!common) { - mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn); + mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn); return NOTIFY_OK; } @@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev, if (err) return err; - atomic_set(&qp->common.refcount, 1); + refcount_set(&qp->common.refcount, 1); init_completion(&qp->common.free); qp->pid = current->pid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 17ce9dd56b13..18af6981e0be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -51,7 +51,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) return -ENOMEM; } - ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX); + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL); if (!ns) { mlx5_core_err(dev, "Failed to get RDMA RX namespace"); err = -EOPNOTSUPP; |