summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeert Uytterhoeven <geert+renesas@glider.be>2020-10-01 12:10:07 +0200
committerDavid S. Miller <davem@davemloft.net>2020-10-01 12:53:31 -0700
commitce19a9eb53be2fe458cad850e8e70c39cd38cca7 (patch)
tree6cb4f3743ca6e8904903cdbcad23e1e71384a2c9
parentd7adf6331189cbe9d73729052a9ff14fed802667 (diff)
ravb: Split delay handling in parsing and applying
Currently, full delay handling is done in both the probe and resume paths. Split it in two parts, so the resume path doesn't have to redo the parsing part over and over again. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/renesas/ravb.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c21
2 files changed, 19 insertions, 6 deletions
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9f88b5db4f89..e5ca12ce93c7 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1036,7 +1036,9 @@ struct ravb_private {
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
unsigned wol_enabled:1;
- int num_tx_desc; /* TX descriptors per packet */
+ unsigned rxcidm:1; /* RX Clock Internal Delay Mode */
+ unsigned txcidm:1; /* TX Clock Internal Delay Mode */
+ int num_tx_desc; /* TX descriptors per packet */
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index f684296df871..0a5608173697 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1999,23 +1999,32 @@ static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
};
/* Set tx and rx clock internal delay modes */
-static void ravb_set_delay_mode(struct net_device *ndev)
+static void ravb_parse_delay_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int set = 0;
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
- set |= APSR_DM_RDM;
+ priv->rxcidm = 1;
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
"phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
phy_modes(priv->phy_interface)))
- set |= APSR_DM_TDM;
+ priv->txcidm = 1;
}
+}
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+
+ if (priv->rxcidm)
+ set |= APSR_DM_RDM;
+ if (priv->txcidm)
+ set |= APSR_DM_TDM;
ravb_modify(ndev, APSR, APSR_DM, set);
}
@@ -2148,8 +2157,10 @@ static int ravb_probe(struct platform_device *pdev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- if (priv->chip_id != RCAR_GEN2)
+ if (priv->chip_id != RCAR_GEN2) {
+ ravb_parse_delay_mode(ndev);
ravb_set_delay_mode(ndev);
+ }
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;