summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c77
4 files changed, 293 insertions, 40 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0c5237264e3e..bb77e2207804 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1044,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
mlxsw_reg_sftr_port_mask_set(payload, port, 1);
}
+/* SFDF - Switch Filtering DB Flush
+ * --------------------------------
+ * The switch filtering DB flush register is used to flush the FDB.
+ * Note that FDB notifications are flushed as well.
+ */
+#define MLXSW_REG_SFDF_ID 0x2013
+#define MLXSW_REG_SFDF_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
+ .id = MLXSW_REG_SFDF_ID,
+ .len = MLXSW_REG_SFDF_LEN,
+};
+
+/* reg_sfdf_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfdf_flush_type {
+ MLXSW_REG_SFDF_FLUSH_PER_SWID,
+ MLXSW_REG_SFDF_FLUSH_PER_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_PORT,
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_LAG,
+ MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+};
+
+/* reg_sfdf_flush_type
+ * Flush type.
+ * 0 - All SWID dynamic entries are flushed.
+ * 1 - All FID dynamic entries are flushed.
+ * 2 - All dynamic entries pointing to port are flushed.
+ * 3 - All FID dynamic entries pointing to port are flushed.
+ * 4 - All dynamic entries pointing to LAG are flushed.
+ * 5 - All FID dynamic entries pointing to LAG are flushed.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
+
+/* reg_sfdf_flush_static
+ * Static.
+ * 0 - Flush only dynamic entries.
+ * 1 - Flush both dynamic and static entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
+
+static inline void mlxsw_reg_sfdf_pack(char *payload,
+ enum mlxsw_reg_sfdf_flush_type type)
+{
+ MLXSW_REG_ZERO(sfdf, payload);
+ mlxsw_reg_sfdf_flush_type_set(payload, type);
+ mlxsw_reg_sfdf_flush_static_set(payload, true);
+}
+
+/* reg_sfdf_fid
+ * FID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
+
+/* reg_sfdf_system_port
+ * Port to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
+
+/* reg_sfdf_port_fid_system_port
+ * Port to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
+
+/* reg_sfdf_lag_id
+ * LAG ID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
+
+/* reg_sfdf_lag_fid_lag_id
+ * LAG ID to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
+
/* SLDR - Switch LAG Descriptor Register
* -----------------------------------------
* The switch LAG descriptor register is populated by LAG descriptors.
@@ -1701,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
* Module number.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
/* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
/* reg_pmlp_rx_lane
* Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
* equal to Tx lane.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
{
@@ -3121,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SFGC";
case MLXSW_REG_SFTR_ID:
return "SFTR";
+ case MLXSW_REG_SFDF_ID:
+ return "SFDF";
case MLXSW_REG_SLDR_ID:
return "SLDR";
case MLXSW_REG_SLCR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ce6845d534a8..217856bdd400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile,
};
+static int
+mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
+ mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+ mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
+ mlxsw_sp_port->local_port);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
+ mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+ mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, last_err = 0;
+ u16 vid;
+
+ for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
+ if (err)
+ last_err = err;
+ }
+
+ return last_err;
+}
+
+static int
+__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, last_err = 0;
+ u16 vid;
+
+ for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
+ if (err)
+ last_err = err;
+ }
+
+ return last_err;
+}
+
+static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!list_empty(&mlxsw_sp_port->vports_list))
+ if (mlxsw_sp_port->lagged)
+ return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
+ else
+ return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
+ else
+ if (mlxsw_sp_port->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+}
+
+static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
+ u16 fid = mlxsw_sp_vfid_to_fid(vfid);
+
+ if (mlxsw_sp_vport->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
+ fid);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+}
+
static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool flush_fdb)
{
struct net_device *dev = mlxsw_sp_port->dev;
+ if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
+ netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
+
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
@@ -2200,10 +2313,15 @@ err_col_port_enable:
return err;
}
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev,
+ bool flush_fdb);
+
static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_upper *lag;
u16 lag_id = mlxsw_sp_port->lag_id;
int err;
@@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
+ /* In case we leave a LAG device that has bridges built on top,
+ * then their teardown sequence is never issued and we need to
+ * invoke the necessary cleanup routines ourselves.
+ */
+ list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+ vport.list) {
+ struct net_device *br_dev;
+
+ if (!mlxsw_sp_vport->bridged)
+ continue;
+
+ br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
+ }
+
+ if (mlxsw_sp_port->bridged) {
+ mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
+
+ if (lag->ref_count == 1)
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ }
+
if (lag->ref_count == 1) {
+ if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
+ netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
if (err)
return err;
@@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev);
-
static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *vlan_dev)
{
@@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *br_dev;
br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev);
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
}
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
@@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
}
mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
} else {
- err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ true);
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
if (err) {
netdev_err(dev, "Failed to leave bridge\n");
@@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ bool flush_fdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
@@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
goto err_vport_flood_set;
}
+ if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
+ netdev_err(dev, "Failed to flush FDB\n");
+
/* Switch between the vFIDs and destroy the old one if needed. */
new_vfid->nr_vports++;
mlxsw_sp_vport->vport.vfid = new_vfid;
@@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
if (!mlxsw_sp_vport)
return NOTIFY_DONE;
err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
- upper_dev);
+ upper_dev, true);
if (err) {
netdev_err(dev, "Failed to leave bridge\n");
return NOTIFY_BAD;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a23dc610d259..df279fc81c2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc);
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 45479ef5bcf4..f2ab7cd09cf3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -124,14 +124,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
switch (state) {
- case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_FORWARDING:
spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
break;
- case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_LEARNING:
spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_BLOCKING:
spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
break;
@@ -936,6 +936,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
vlan->vid_begin, vlan->vid_end, false);
}
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+}
+
static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb)
@@ -1040,10 +1048,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
+ switchdev_obj_dump_cb_t *cb,
+ struct net_device *orig_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u16 vport_vid = 0, vport_fid = 0;
+ struct mlxsw_sp_port *tmp;
+ u16 vport_fid = 0;
char *sfd_pl;
char mac[ETH_ALEN];
u16 fid;
@@ -1064,7 +1074,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
vport_fid = mlxsw_sp_vfid_to_fid(tmp);
- vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
}
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
@@ -1088,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
&local_port);
if (local_port == mlxsw_sp_port->local_port) {
- if (vport_fid && vport_fid != fid)
- continue;
- else if (vport_fid)
- fdb->vid = vport_vid;
- else
+ if (vport_fid && vport_fid == fid)
+ fdb->vid = 0;
+ else if (!vport_fid &&
+ !mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid;
+ else
+ continue;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj);
@@ -1104,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
mac, &fid, &lag_id);
- if (mlxsw_sp_port ==
- mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
- if (vport_fid && vport_fid != fid)
+ tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
+ if (tmp && tmp->local_port ==
+ mlxsw_sp_port->local_port) {
+ /* LAG records can only point to LAG
+ * devices or VLAN devices on top.
+ */
+ if (!netif_is_lag_master(orig_dev) &&
+ !is_vlan_dev(orig_dev))
continue;
- else if (vport_fid)
- fdb->vid = vport_vid;
- else
+ if (vport_fid && vport_fid == fid)
+ fdb->vid = 0;
+ else if (!vport_fid &&
+ !mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid;
+ else
+ continue;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj);
@@ -1176,7 +1194,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb,
+ obj->orig_dev);
break;
default:
err = -EOPNOTSUPP;
@@ -1194,14 +1213,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
};
-static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
- bool adding, char *mac, u16 vid,
+static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
+ char *mac, u16 vid,
struct net_device *dev)
{
struct switchdev_notifier_fdb_info info;
unsigned long notifier_type;
- if (learning && learning_sync) {
+ if (learning_sync) {
info.addr = mac;
info.vid = vid;
notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
@@ -1237,7 +1256,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
}
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ vid = 0;
/* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport;
} else {
@@ -1257,8 +1276,7 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
- mlxsw_sp_port->learning_sync,
+ mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
adding, mac, vid, mlxsw_sp_port->dev);
return;
@@ -1273,6 +1291,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bool adding)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *dev;
char mac[ETH_ALEN];
u16 lag_vid = 0;
u16 lag_id;
@@ -1298,11 +1317,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- lag_vid = vid;
+ lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ dev = mlxsw_sp_vport->dev;
+ vid = 0;
/* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport;
} else {
+ dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
vid = fid;
}
@@ -1319,10 +1340,8 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
- mlxsw_sp_port->learning_sync,
- adding, mac, vid,
- mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
+ mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
+ vid, dev);
return;
just_remove: