diff options
author | David Woodhouse <dwmw2@infradead.org> | 2015-09-24 11:38:22 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-09-26 22:38:34 -0700 |
commit | 8b7a7048220f86547db31de0abe1ea6dd2cfa892 (patch) | |
tree | d2fc6cc721822b02e326569b2d011bd906a6842b /drivers | |
parent | 5a58f227790faded5a3ef6075f3ddd65093e0f86 (diff) |
8139cp: Fix GSO MSS handling
When fixing the TSO support I noticed we just mask ->gso_size with the
MSSMask value and don't care about the consequences.
Provide a .ndo_features_check() method which drops the NETIF_F_TSO
feature for any skb which would exceed the maximum, and thus forces it
to be segmented by software.
Then we can stop the masking in cp_start_xmit(), and just WARN if the
maximum is exceeded, which should now never happen.
Finally, Francois Romieu noticed that we didn't even have the right
value for MSSMask anyway; it should be 0x7ff (11 bits) not 0xfff.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/realtek/8139cp.c | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index e5173f33428b..deae10d7426d 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -175,7 +175,7 @@ enum { LastFrag = (1 << 28), /* Final segment of a packet */ LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ MSSShift = 16, /* MSS value position */ - MSSMask = 0xfff, /* MSS value: 11 bits */ + MSSMask = 0x7ff, /* MSS value: 11 bits */ TxError = (1 << 23), /* Tx error summary */ RxError = (1 << 20), /* Rx error summary */ IPCS = (1 << 18), /* Calculate IP checksum */ @@ -754,10 +754,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; mss = skb_shinfo(skb)->gso_size; + if (mss > MSSMask) { + WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n", + mss); + goto out_dma_error; + } + opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); opts1 = DescOwn; if (mss) - opts1 |= LargeSend | ((mss & MSSMask) << MSSShift); + opts1 |= LargeSend | (mss << MSSShift); else if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) @@ -1852,6 +1858,15 @@ static void cp_set_d3_state (struct cp_private *cp) pci_set_power_state (cp->pdev, PCI_D3hot); } +static netdev_features_t cp_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb_shinfo(skb)->gso_size > MSSMask) + features &= ~NETIF_F_TSO; + + return vlan_features_check(skb, features); +} static const struct net_device_ops cp_netdev_ops = { .ndo_open = cp_open, .ndo_stop = cp_close, @@ -1864,6 +1879,7 @@ static const struct net_device_ops cp_netdev_ops = { .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, .ndo_change_mtu = cp_change_mtu, + .ndo_features_check = cp_features_check, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cp_poll_controller, |