summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorPetr Machata <petrm@nvidia.com>2020-09-16 09:35:23 +0300
committerDavid S. Miller <davem@davemloft.net>2020-09-16 15:19:30 -0700
commit7ace2c36aa8e797a4e163fda1f46433a86799b37 (patch)
tree10e1ac3cc4cc46c757639b34d10ab90ad40d62db /drivers/net/ethernet
parente9c97e0ece6b89cfe1cb33f171c62917522c4ce3 (diff)
mlxsw: spectrum: Move here the three-step headroom configuration from DCB
The ETS handler performs the headroom configuration in three steps: first it resizes the buffers and adds any new ones. Then it redirects priorities to the new buffers. And finally it sets the size of the now-unused buffers to zero. This way no packet drops are introduced. This sort of careful approach will also be useful for configuring port buffer sizes and priority map by hand, through dcbnl_setbuffer. Therefore move the code from the DCB handler to the generic headroom function. Signed-off-by: Petr Machata <petrm@nvidia.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c61
2 files changed, 61 insertions, 61 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 48e48c398142..1f0e930d8a50 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -718,6 +718,30 @@ static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port
return 0;
}
+static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ bool dirty;
+ int prio;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
+
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->prios = hdroom->prios;
+ return 0;
+}
+
static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_hdroom *hdroom)
{
@@ -735,17 +759,50 @@ static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_hdroom *hdroom, bool force)
{
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom tmp_hdroom;
int err;
+ int i;
+
+ /* Port buffers need to be configured in three steps. First, all buffers
+ * with non-zero size are configured. Then, prio-to-buffer map is
+ * updated, allowing traffic to flow to the now non-zero buffers.
+ * Finally, zero-sized buffers are configured, because now no traffic
+ * should be directed to them anymore. This way, in a non-congested
+ * system, no packet drops are introduced by the reconfiguration.
+ */
- if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ tmp_hdroom = orig_hdroom;
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ if (hdroom->bufs.buf[i].size_cells)
+ tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
+ }
+
+ if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
+ !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
return -ENOBUFS;
- err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
if (err)
return err;
+ err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
+ if (err)
+ goto err_configure_priomap;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_buffers;
+
*mlxsw_sp_port->hdroom = *hdroom;
return 0;
+
+err_configure_buffers:
+ mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_priomap:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
+ return err;
}
int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 098432c881c2..d9a556dbe85e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -64,85 +64,28 @@ static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct mlxsw_sp_hdroom *hdroom, bool force)
-{
- char pptb_pl[MLXSW_REG_PPTB_LEN];
- bool dirty;
- int prio;
- int err;
-
- dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
- if (!dirty && !force)
- return 0;
-
- mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
-
- err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
- if (err)
- return err;
-
- mlxsw_sp_port->hdroom->prios = hdroom->prios;
- return 0;
-}
-
static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct ieee_ets *ets)
{
struct net_device *dev = mlxsw_sp_port->dev;
- struct mlxsw_sp_hdroom orig_hdroom;
- struct mlxsw_sp_hdroom tmp_hdroom;
struct mlxsw_sp_hdroom hdroom;
int prio;
int err;
- int i;
- orig_hdroom = *mlxsw_sp_port->hdroom;
-
- hdroom = orig_hdroom;
+ hdroom = *mlxsw_sp_port->hdroom;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio];
mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
- /* Create the required PGs, but don't destroy existing ones, as
- * traffic is still directed to them.
- */
- tmp_hdroom = hdroom;
- for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
- if (!tmp_hdroom.bufs.buf[i].size_cells)
- tmp_hdroom.bufs.buf[i].size_cells =
- mlxsw_sp_port->hdroom->bufs.buf[i].size_cells;
- }
-
- err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &tmp_hdroom);
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
}
- err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &hdroom, false);
- if (err) {
- netdev_err(dev, "Failed to set PG-priority mapping\n");
- goto err_port_prio_pg_map;
- }
-
- err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
- if (err) {
- netdev_warn(dev, "Failed to remove unused PGs\n");
- goto err_configure_buffers;
- }
-
return 0;
-
-err_configure_buffers:
- mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
-err_port_prio_pg_map:
- mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
- return err;
}
static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,