diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-08-30 13:32:29 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-30 13:32:29 -0400 |
commit | ed735ccbefaf7e5e3ef61418f7e209b8c59308a7 (patch) | |
tree | b8cc69814d2368b08d0a84c8da0c12028bd04867 /drivers/net | |
parent | 39fbe47377062200acc26ea0ccef223b4399a82c (diff) | |
parent | d8971fcb702e24d1e22c77fd1772f182ffee87e3 (diff) |
Merge HEAD from /spare/repo/linux-2.6/.git
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2.c | 229 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 10 | ||||
-rw-r--r-- | drivers/net/bonding/bond_3ad.c | 11 | ||||
-rw-r--r-- | drivers/net/bonding/bond_3ad.h | 2 | ||||
-rw-r--r-- | drivers/net/bonding/bond_alb.c | 5 | ||||
-rw-r--r-- | drivers/net/hamradio/bpqether.c | 4 | ||||
-rw-r--r-- | drivers/net/ibmveth.c | 2 | ||||
-rw-r--r-- | drivers/net/iseries_veth.c | 2 | ||||
-rw-r--r-- | drivers/net/ppp_generic.c | 1 | ||||
-rw-r--r-- | drivers/net/pppoe.c | 6 | ||||
-rw-r--r-- | drivers/net/rrunner.c | 3 | ||||
-rw-r--r-- | drivers/net/s2io.h | 4 | ||||
-rw-r--r-- | drivers/net/shaper.c | 50 | ||||
-rw-r--r-- | drivers/net/tg3.c | 325 | ||||
-rw-r--r-- | drivers/net/tg3.h | 10 | ||||
-rw-r--r-- | drivers/net/wan/hdlc_generic.c | 2 | ||||
-rw-r--r-- | drivers/net/wan/lapbether.c | 2 | ||||
-rw-r--r-- | drivers/net/wan/sdla_fr.c | 22 | ||||
-rw-r--r-- | drivers/net/wan/syncppp.c | 2 |
19 files changed, 413 insertions, 279 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 8acc655ec1e8..7babf6af4e28 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -14,8 +14,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.2.19" -#define DRV_MODULE_RELDATE "May 23, 2005" +#define DRV_MODULE_VERSION "1.2.20" +#define DRV_MODULE_RELDATE "August 22, 2005" #define RUN_AT(x) (jiffies + (x)) @@ -52,7 +52,6 @@ static struct { { "HP NC370i Multifunction Gigabit Server Adapter" }, { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, { "HP NC370F Multifunction Gigabit Server Adapter" }, - { 0 }, }; static struct pci_device_id bnx2_pci_tbl[] = { @@ -108,6 +107,15 @@ static struct flash_spec flash_table[] = MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); +static inline u32 bnx2_tx_avail(struct bnx2 *bp) +{ + u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); + + if (diff > MAX_TX_DESC_CNT) + diff = (diff & MAX_TX_DESC_CNT) - 1; + return (bp->tx_ring_size - diff); +} + static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) { @@ -807,7 +815,19 @@ bnx2_setup_serdes_phy(struct bnx2 *bp) bnx2_write_phy(bp, MII_ADVERTISE, new_adv); bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); - bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval; + if (CHIP_NUM(bp) == CHIP_NUM_5706) { + /* Speed up link-up time when the link partner + * does not autonegotiate which is very common + * in blade servers. Some blade servers use + * IPMI for kerboard input and it's important + * to minimize link disruptions. Autoneg. involves + * exchanging base pages plus 3 next pages and + * normally completes in about 120 msec. + */ + bp->current_interval = SERDES_AN_TIMEOUT; + bp->serdes_an_pending = 1; + mod_timer(&bp->timer, jiffies + bp->current_interval); + } } return 0; @@ -1327,22 +1347,17 @@ bnx2_tx_int(struct bnx2 *bp) } } - atomic_add(tx_free_bd, &bp->tx_avail_bd); + bp->tx_cons = sw_cons; if (unlikely(netif_queue_stopped(bp->dev))) { - unsigned long flags; - - spin_lock_irqsave(&bp->tx_lock, flags); + spin_lock(&bp->tx_lock); if ((netif_queue_stopped(bp->dev)) && - (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) { + (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { netif_wake_queue(bp->dev); } - spin_unlock_irqrestore(&bp->tx_lock, flags); + spin_unlock(&bp->tx_lock); } - - bp->tx_cons = sw_cons; - } static inline void @@ -1523,15 +1538,12 @@ bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs) BNX2_PCICFG_INT_ACK_CMD_MASK_INT); /* Return here if interrupt is disabled. */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - return IRQ_RETVAL(1); - } + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; - if (netif_rx_schedule_prep(dev)) { - __netif_rx_schedule(dev); - } + netif_rx_schedule(dev); - return IRQ_RETVAL(1); + return IRQ_HANDLED; } static irqreturn_t @@ -1549,22 +1561,19 @@ bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs) if ((bp->status_blk->status_idx == bp->last_status_idx) || (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) - return IRQ_RETVAL(0); + return IRQ_NONE; REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); /* Return here if interrupt is shared and is disabled. */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - return IRQ_RETVAL(1); - } + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; - if (netif_rx_schedule_prep(dev)) { - __netif_rx_schedule(dev); - } + netif_rx_schedule(dev); - return IRQ_RETVAL(1); + return IRQ_HANDLED; } static int @@ -1581,11 +1590,9 @@ bnx2_poll(struct net_device *dev, int *budget) (bp->status_blk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { - unsigned long flags; - - spin_lock_irqsave(&bp->phy_lock, flags); + spin_lock(&bp->phy_lock); bnx2_phy_int(bp); - spin_unlock_irqrestore(&bp->phy_lock, flags); + spin_unlock(&bp->phy_lock); } if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) { @@ -1628,9 +1635,8 @@ bnx2_set_rx_mode(struct net_device *dev) struct bnx2 *bp = dev->priv; u32 rx_mode, sort_mode; int i; - unsigned long flags; - spin_lock_irqsave(&bp->phy_lock, flags); + spin_lock_bh(&bp->phy_lock); rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); @@ -1691,7 +1697,7 @@ bnx2_set_rx_mode(struct net_device *dev) REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); - spin_unlock_irqrestore(&bp->phy_lock, flags); + spin_unlock_bh(&bp->phy_lock); } static void @@ -2960,7 +2966,6 @@ bnx2_init_tx_ring(struct bnx2 *bp) bp->tx_prod = 0; bp->tx_cons = 0; bp->tx_prod_bseq = 0; - atomic_set(&bp->tx_avail_bd, bp->tx_ring_size); val = BNX2_L2CTX_TYPE_TYPE_L2; val |= BNX2_L2CTX_TYPE_SIZE_L2; @@ -3507,11 +3512,11 @@ bnx2_test_registers(struct bnx2 *bp) rw_mask = reg_tbl[i].rw_mask; ro_mask = reg_tbl[i].ro_mask; - save_val = readl((u8 *) bp->regview + offset); + save_val = readl(bp->regview + offset); - writel(0, (u8 *) bp->regview + offset); + writel(0, bp->regview + offset); - val = readl((u8 *) bp->regview + offset); + val = readl(bp->regview + offset); if ((val & rw_mask) != 0) { goto reg_test_err; } @@ -3520,9 +3525,9 @@ bnx2_test_registers(struct bnx2 *bp) goto reg_test_err; } - writel(0xffffffff, (u8 *) bp->regview + offset); + writel(0xffffffff, bp->regview + offset); - val = readl((u8 *) bp->regview + offset); + val = readl(bp->regview + offset); if ((val & rw_mask) != rw_mask) { goto reg_test_err; } @@ -3531,11 +3536,11 @@ bnx2_test_registers(struct bnx2 *bp) goto reg_test_err; } - writel(save_val, (u8 *) bp->regview + offset); + writel(save_val, bp->regview + offset); continue; reg_test_err: - writel(save_val, (u8 *) bp->regview + offset); + writel(save_val, bp->regview + offset); ret = -ENODEV; break; } @@ -3752,10 +3757,10 @@ bnx2_test_link(struct bnx2 *bp) { u32 bmsr; - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); bnx2_read_phy(bp, MII_BMSR, &bmsr); bnx2_read_phy(bp, MII_BMSR, &bmsr); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); if (bmsr & BMSR_LSTATUS) { return 0; @@ -3801,6 +3806,9 @@ bnx2_timer(unsigned long data) struct bnx2 *bp = (struct bnx2 *) data; u32 msg; + if (!netif_running(bp->dev)) + return; + if (atomic_read(&bp->intr_sem) != 0) goto bnx2_restart_timer; @@ -3809,15 +3817,16 @@ bnx2_timer(unsigned long data) if ((bp->phy_flags & PHY_SERDES_FLAG) && (CHIP_NUM(bp) == CHIP_NUM_5706)) { - unsigned long flags; - spin_lock_irqsave(&bp->phy_lock, flags); + spin_lock(&bp->phy_lock); if (bp->serdes_an_pending) { bp->serdes_an_pending--; } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { u32 bmcr; + bp->current_interval = bp->timer_interval; + bnx2_read_phy(bp, MII_BMCR, &bmcr); if (bmcr & BMCR_ANENABLE) { @@ -3860,14 +3869,14 @@ bnx2_timer(unsigned long data) } } + else + bp->current_interval = bp->timer_interval; - spin_unlock_irqrestore(&bp->phy_lock, flags); + spin_unlock(&bp->phy_lock); } bnx2_restart_timer: - bp->timer.expires = RUN_AT(bp->timer_interval); - - add_timer(&bp->timer); + mod_timer(&bp->timer, jiffies + bp->current_interval); } /* Called with rtnl_lock */ @@ -3920,12 +3929,7 @@ bnx2_open(struct net_device *dev) return rc; } - init_timer(&bp->timer); - - bp->timer.expires = RUN_AT(bp->timer_interval); - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2_timer; - add_timer(&bp->timer); + mod_timer(&bp->timer, jiffies + bp->current_interval); atomic_set(&bp->intr_sem, 0); @@ -3976,12 +3980,17 @@ bnx2_reset_task(void *data) { struct bnx2 *bp = data; + if (!netif_running(bp->dev)) + return; + + bp->in_reset_task = 1; bnx2_netif_stop(bp); bnx2_init_nic(bp); atomic_set(&bp->intr_sem, 1); bnx2_netif_start(bp); + bp->in_reset_task = 0; } static void @@ -4041,9 +4050,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) u16 prod, ring_prod; int i; - if (unlikely(atomic_read(&bp->tx_avail_bd) < - (skb_shinfo(skb)->nr_frags + 1))) { - + if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) { netif_stop_queue(dev); printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", dev->name); @@ -4140,8 +4147,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) prod = NEXT_TX_BD(prod); bp->tx_prod_bseq += skb->len; - atomic_sub(last_frag + 1, &bp->tx_avail_bd); - REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod); REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq); @@ -4150,17 +4155,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) bp->tx_prod = prod; dev->trans_start = jiffies; - if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) { - unsigned long flags; - - spin_lock_irqsave(&bp->tx_lock, flags); - if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) { - netif_stop_queue(dev); - - if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS) - netif_wake_queue(dev); - } - spin_unlock_irqrestore(&bp->tx_lock, flags); + if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { + spin_lock(&bp->tx_lock); + netif_stop_queue(dev); + + if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS) + netif_wake_queue(dev); + spin_unlock(&bp->tx_lock); } return NETDEV_TX_OK; @@ -4173,7 +4174,13 @@ bnx2_close(struct net_device *dev) struct bnx2 *bp = dev->priv; u32 reset_code; - flush_scheduled_work(); + /* Calling flush_scheduled_work() may deadlock because + * linkwatch_event() may be on the workqueue and it will try to get + * the rtnl_lock which we are holding. + */ + while (bp->in_reset_task) + msleep(1); + bnx2_netif_stop(bp); del_timer_sync(&bp->timer); if (bp->wol) @@ -4390,11 +4397,11 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->req_line_speed = req_line_speed; bp->req_duplex = req_duplex; - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); bnx2_setup_phy(bp); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); return 0; } @@ -4464,19 +4471,20 @@ bnx2_nway_reset(struct net_device *dev) return -EINVAL; } - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); /* Force a link down visible on the other side */ if (bp->phy_flags & PHY_SERDES_FLAG) { bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); msleep(20); - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); if (CHIP_NUM(bp) == CHIP_NUM_5706) { - bp->serdes_an_pending = SERDES_AN_TIMEOUT / - bp->timer_interval; + bp->current_interval = SERDES_AN_TIMEOUT; + bp->serdes_an_pending = 1; + mod_timer(&bp->timer, jiffies + bp->current_interval); } } @@ -4484,7 +4492,7 @@ bnx2_nway_reset(struct net_device *dev) bmcr &= ~BMCR_LOOPBACK; bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); return 0; } @@ -4670,11 +4678,11 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) bp->autoneg &= ~AUTONEG_FLOW_CTRL; } - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); bnx2_setup_phy(bp); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); return 0; } @@ -4698,7 +4706,7 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data) #define BNX2_NUM_STATS 45 -struct { +static struct { char string[ETH_GSTRING_LEN]; } bnx2_stats_str_arr[BNX2_NUM_STATS] = { { "rx_bytes" }, @@ -4750,7 +4758,7 @@ struct { #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) -unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { +static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { STATS_OFFSET32(stat_IfHCInOctets_hi), STATS_OFFSET32(stat_IfHCInBadOctets_hi), STATS_OFFSET32(stat_IfHCOutOctets_hi), @@ -4801,7 +4809,7 @@ unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are * skipped because of errata. */ -u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { +static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { 8,0,8,8,8,8,8,8,8,8, 4,0,4,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,4,4, @@ -4811,7 +4819,7 @@ u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { #define BNX2_NUM_TESTS 6 -struct { +static struct { char string[ETH_GSTRING_LEN]; } bnx2_tests_str_arr[BNX2_NUM_TESTS] = { { "register_test (offline)" }, @@ -4910,7 +4918,7 @@ bnx2_get_ethtool_stats(struct net_device *dev, struct bnx2 *bp = dev->priv; int i; u32 *hw_stats = (u32 *) bp->stats_blk; - u8 *stats_len_arr = 0; + u8 *stats_len_arr = NULL; if (hw_stats == NULL) { memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); @@ -5012,7 +5020,7 @@ static struct ethtool_ops bnx2_ethtool_ops = { static int bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; + struct mii_ioctl_data *data = if_mii(ifr); struct bnx2 *bp = dev->priv; int err; @@ -5024,9 +5032,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: { u32 mii_regval; - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); data->val_out = mii_regval; @@ -5037,9 +5045,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!capable(CAP_NET_ADMIN)) return -EPERM; - spin_lock_irq(&bp->phy_lock); + spin_lock_bh(&bp->phy_lock); err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); - spin_unlock_irq(&bp->phy_lock); + spin_unlock_bh(&bp->phy_lock); return err; @@ -5057,6 +5065,9 @@ bnx2_change_mac_addr(struct net_device *dev, void *p) struct sockaddr *addr = p; struct bnx2 *bp = dev->priv; + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (netif_running(dev)) bnx2_set_mac_addr(bp); @@ -5305,6 +5316,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->stats_ticks = 1000000 & 0xffff00; bp->timer_interval = HZ; + bp->current_interval = HZ; /* Disable WOL support if we are running on a SERDES chip. */ if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) { @@ -5328,6 +5340,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->req_line_speed = 0; if (bp->phy_flags & PHY_SERDES_FLAG) { bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; + + reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE + + BNX2_PORT_HW_CFG_CONFIG); + reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; + if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { + bp->autoneg = 0; + bp->req_line_speed = bp->line_speed = SPEED_1000; + bp->req_duplex = DUPLEX_FULL; + } } else { bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; @@ -5335,11 +5356,17 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; + init_timer(&bp->timer); + bp->timer.expires = RUN_AT(bp->timer_interval); + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2_timer; + return 0; err_out_unmap: if (bp->regview) { iounmap(bp->regview); + bp->regview = NULL; } err_out_release: @@ -5454,6 +5481,8 @@ bnx2_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = dev->priv; + flush_scheduled_work(); + unregister_netdev(dev); if (bp->regview) @@ -5505,12 +5534,12 @@ bnx2_resume(struct pci_dev *pdev) } static struct pci_driver bnx2_pci_driver = { - name: DRV_MODULE_NAME, - id_table: bnx2_pci_tbl, - probe: bnx2_init_one, - remove: __devexit_p(bnx2_remove_one), - suspend: bnx2_suspend, - resume: bnx2_resume, + .name = DRV_MODULE_NAME, + .id_table = bnx2_pci_tbl, + .probe = bnx2_init_one, + .remove = __devexit_p(bnx2_remove_one), + .suspend = bnx2_suspend, + .resume = bnx2_resume, }; static int __init bnx2_init(void) diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 8214a2853d0d..9ad3f5740cd8 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -3841,12 +3841,12 @@ struct bnx2 { struct status_block *status_blk; u32 last_status_idx; - atomic_t tx_avail_bd; struct tx_bd *tx_desc_ring; struct sw_bd *tx_buf_ring; u32 tx_prod_bseq; u16 tx_prod; u16 tx_cons; + int tx_ring_size; #ifdef BCM_VLAN struct vlan_group *vlgrp; @@ -3872,8 +3872,10 @@ struct bnx2 { char *name; int timer_interval; + int current_interval; struct timer_list timer; struct work_struct reset_task; + int in_reset_task; /* Used to synchronize phy accesses. */ spinlock_t phy_lock; @@ -3927,7 +3929,6 @@ struct bnx2 { u16 fw_wr_seq; u16 fw_drv_pulse_wr_seq; - int tx_ring_size; dma_addr_t tx_desc_mapping; @@ -3985,7 +3986,7 @@ struct bnx2 { #define PHY_LOOPBACK 2 u8 serdes_an_pending; -#define SERDES_AN_TIMEOUT (2 * HZ) +#define SERDES_AN_TIMEOUT (HZ / 3) u8 mac_addr[8]; @@ -4171,6 +4172,9 @@ struct fw_info { #define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 #define BNX2_PORT_HW_CFG_CONFIG 0x00000058 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000 #define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 #define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a2e8dda5afac..d2f34d5a8083 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2419,22 +2419,19 @@ out: return 0; } -int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype) +int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev) { struct bonding *bond = dev->priv; struct slave *slave = NULL; int ret = NET_RX_DROP; - if (!(dev->flags & IFF_MASTER)) { + if (!(dev->flags & IFF_MASTER)) goto out; - } read_lock(&bond->lock); - slave = bond_get_slave_by_dev((struct bonding *)dev->priv, - skb->real_dev); - if (slave == NULL) { + slave = bond_get_slave_by_dev((struct bonding *)dev->priv, orig_dev); + if (!slave) goto out_unlock; - } bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index f46823894187..673a30af5660 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -295,6 +295,6 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave); void bond_3ad_handle_link_change(struct slave *slave, char link); int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); -int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype); +int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev); #endif //__BOND_3AD_H__ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 19e829b567d0..f8fce3961197 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -354,15 +354,14 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) _unlock_rx_hashtbl(bond); } -static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype) +static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev) { struct bonding *bond = bond_dev->priv; struct arp_pkt *arp = (struct arp_pkt *)skb->data; int res = NET_RX_DROP; - if (!(bond_dev->flags & IFF_MASTER)) { + if (!(bond_dev->flags & IFF_MASTER)) goto out; - } if (!arp) { dprintk("Packet has no ARP data\n"); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index ba9f0580e1f9..2946e037a9b1 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -98,7 +98,7 @@ static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; static char bpq_eth_addr[6]; -static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *); +static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); static const char *bpq_print_ethaddr(const unsigned char *); @@ -165,7 +165,7 @@ static inline int dev_is_ethdev(struct net_device *dev) /* * Receive an AX.25 frame via an ethernet interface. */ -static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) +static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { int len; char * ptr; diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index c39b0609742a..32d5fabd4b10 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -1144,7 +1144,7 @@ static void ibmveth_proc_unregister_driver(void) static struct vio_device_id ibmveth_device_table[] __devinitdata= { { "network", "IBM,l-lan"}, - { 0,} + { "", "" } }; MODULE_DEVICE_TABLE(vio, ibmveth_device_table); diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 55af32e9bf08..183ba97785b0 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -1370,7 +1370,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id) */ static struct vio_device_id veth_device_table[] __devinitdata = { { "vlan", "" }, - { NULL, NULL } + { "", "" } }; MODULE_DEVICE_TABLE(vio, veth_device_table); diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index a32668e88e09..bb71638a7c44 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1657,7 +1657,6 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) skb->dev = ppp->dev; skb->protocol = htons(npindex_to_ethertype[npi]); skb->mac.raw = skb->data; - skb->input_dev = ppp->dev; netif_rx(skb); ppp->dev->last_rx = jiffies; } diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index ce1a9bf7b9a7..82f236cc3b9b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -377,7 +377,8 @@ abort_kfree: ***********************************************************************/ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt) + struct packet_type *pt, + struct net_device *orig_dev) { struct pppoe_hdr *ph; @@ -426,7 +427,8 @@ out: ***********************************************************************/ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt) + struct packet_type *pt, + struct net_device *orig_dev) { struct pppoe_hdr *ph; diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 12a86f96d973..ec1a18d189a1 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c @@ -1429,6 +1429,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct rr_private *rrpriv = netdev_priv(dev); struct rr_regs __iomem *regs = rrpriv->regs; + struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; struct ring_ctrl *txctrl; unsigned long flags; u32 index, len = skb->len; @@ -1460,7 +1461,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) ifield = (u32 *)skb_push(skb, 8); ifield[0] = 0; - ifield[1] = skb->private.ifield; + ifield[1] = hcb->ifield; /* * We don't need the lock before we are actually going to start diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 5d9270730ca2..bc64d967f080 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -762,8 +762,8 @@ static inline u64 readq(void __iomem *addr) { u64 ret = 0; ret = readl(addr + 4); - (u64) ret <<= 32; - (u64) ret |= readl(addr); + ret <<= 32; + ret |= readl(addr); return ret; } diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index 3ad0b6751f6f..221354eea21f 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c @@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); -#ifdef SHAPER_COMPLEX /* and broken.. */ - - while(ptr && ptr!=(struct sk_buff *)&shaper->sendq) - { - if(ptr->pri<skb->pri - && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP) - { - struct sk_buff *tmp=ptr->prev; - - /* - * It goes before us therefore we slip the length - * of the new frame. - */ - - SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen; - SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen; - - /* - * The packet may have slipped so far back it - * fell off. - */ - if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY) - { - skb_unlink(ptr); - dev_kfree_skb(ptr); - } - ptr=tmp; - } - else - break; - } - if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq) - skb_queue_head(&shaper->sendq,skb); - else - { - struct sk_buff *tmp; - /* - * Set the packet clock out time according to the - * frames ahead. Im sure a bit of thought could drop - * this loop. - */ - for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next) - SHAPERCB(skb)->shapeclock+=tmp->shapelen; - skb_append(ptr,skb); - } -#else { struct sk_buff *tmp; /* @@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) } else skb_queue_tail(&shaper->sendq, skb); } -#endif + if(sh_debug) printk("Frame queued.\n"); if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) @@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper) * Pull the frame and get interrupts back on. */ - skb_unlink(skb); + skb_unlink(skb, &shaper->sendq); if (shaper->recovery < SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6d4ab1e333b5..af8263a1580e 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -340,41 +340,92 @@ static struct { static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) { - if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { - spin_lock_bh(&tp->indirect_lock); - pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); - pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); - spin_unlock_bh(&tp->indirect_lock); - } else { - writel(val, tp->regs + off); - if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) - readl(tp->regs + off); + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); + readl(tp->regs + off); +} + +static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + + /* In indirect mode when disabling interrupts, we also need + * to clear the interrupt bit in the GRC local ctrl register. + */ + if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && + (val == 0x1)) { + pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, + tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); } } +static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) { - if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { - spin_lock_bh(&tp->indirect_lock); - pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); - pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); - spin_unlock_bh(&tp->indirect_lock); - } else { - void __iomem *dest = tp->regs + off; - writel(val, dest); - readl(dest); /* always flush PCI write */ - } + tp->write32(tp, off, val); + if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) && + !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) && + !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) + tp->read32(tp, off); /* flush */ } -static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val) +static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) { - void __iomem *mbox = tp->regs + off; - writel(val, mbox); - if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) - readl(mbox); + tp->write32_mbox(tp, off, val); + if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && + !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) + tp->read32_mbox(tp, off); } -static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val) +static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) { void __iomem *mbox = tp->regs + off; writel(val, mbox); @@ -384,46 +435,57 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val) readl(mbox); } -#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg)) -#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val) -#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val) +static void tg3_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); +} + +static u32 tg3_read32(struct tg3 *tp, u32 off) +{ + return (readl(tp->regs + off)); +} + +#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) +#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) +#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) +#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) +#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) -#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val)) +#define tw32(reg,val) tp->write32(tp, reg, val) #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val)) -#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg)) -#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg)) -#define tr32(reg) readl(tp->regs + (reg)) -#define tr16(reg) readw(tp->regs + (reg)) -#define tr8(reg) readb(tp->regs + (reg)) +#define tr32(reg) tp->read32(tp, reg) static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { - spin_lock_bh(&tp->indirect_lock); + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - spin_unlock_bh(&tp->indirect_lock); + spin_unlock_irqrestore(&tp->indirect_lock, flags); } static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) { - spin_lock_bh(&tp->indirect_lock); + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - spin_unlock_bh(&tp->indirect_lock); + spin_unlock_irqrestore(&tp->indirect_lock, flags); } static void tg3_disable_ints(struct tg3 *tp) { tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); } static inline void tg3_cond_int(struct tg3 *tp) @@ -439,9 +501,8 @@ static void tg3_enable_ints(struct tg3 *tp) tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - (tp->last_tag << 24)); - tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + (tp->last_tag << 24)); tg3_cond_int(tp); } @@ -472,8 +533,6 @@ static inline unsigned int tg3_has_work(struct tg3 *tp) */ static void tg3_restart_ints(struct tg3 *tp) { - tw32(TG3PCI_MISC_HOST_CTRL, - (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, tp->last_tag << 24); mmiowb(); @@ -3278,9 +3337,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write */ - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); - tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); } } else { /* shared interrupt */ handled = 0; @@ -3323,9 +3381,8 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r /* no work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write */ - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - tp->last_tag << 24); - tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + tp->last_tag << 24); } } else { /* shared interrupt */ handled = 0; @@ -4216,7 +4273,7 @@ static void tg3_stop_fw(struct tg3 *); static int tg3_chip_reset(struct tg3 *tp) { u32 val; - u32 flags_save; + void (*write_op)(struct tg3 *, u32, u32); int i; if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) @@ -4228,8 +4285,9 @@ static int tg3_chip_reset(struct tg3 *tp) * fun things. So, temporarily disable the 5701 * hardware workaround, while we do the reset. */ - flags_save = tp->tg3_flags; - tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG; + write_op = tp->write32; + if (write_op == tg3_write_flush_reg32) + tp->write32 = tg3_write32; /* do the reset */ val = GRC_MISC_CFG_CORECLK_RESET; @@ -4248,8 +4306,8 @@ static int tg3_chip_reset(struct tg3 *tp) val |= GRC_MISC_CFG_KEEP_GPHY_POWER; tw32(GRC_MISC_CFG, val); - /* restore 5701 hardware bug workaround flag */ - tp->tg3_flags = flags_save; + /* restore 5701 hardware bug workaround write method */ + tp->write32 = write_op; /* Unfortunately, we have to delay before the PCI read back. * Some 575X chips even will not respond to a PCI cfg access @@ -4635,7 +4693,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b int cpu_scratch_size, struct fw_info *info) { int err, i; - u32 orig_tg3_flags = tp->tg3_flags; void (*write_op)(struct tg3 *, u32, u32); if (cpu_base == TX_CPU_BASE && @@ -4651,11 +4708,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b else write_op = tg3_write_indirect_reg32; - /* Force use of PCI config space for indirect register - * write calls. - */ - tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; - /* It is possible that bootcode is still loading at this point. * Get the nvram lock first before halting the cpu. */ @@ -4691,7 +4743,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b err = 0; out: - tp->tg3_flags = orig_tg3_flags; return err; } @@ -5808,8 +5859,7 @@ static int tg3_reset_hw(struct tg3 *tp) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); - tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); tp->last_tag = 0; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { @@ -6198,7 +6248,8 @@ static int tg3_test_interrupt(struct tg3 *tp) HOSTCC_MODE_NOW); for (i = 0; i < 5; i++) { - int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + + TG3_64BIT_REG_LOW); if (int_mbox != 0) break; msleep(10); @@ -6598,10 +6649,10 @@ static int tg3_open(struct net_device *dev) /* Mailboxes */ printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", - tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), - tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), - tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), - tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); + tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), + tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), + tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), + tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); /* NIC side send descriptors. */ for (i = 0; i < 6; i++) { @@ -7901,7 +7952,7 @@ static int tg3_test_loopback(struct tg3 *tp) num_pkts++; tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx); - tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); + tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); udelay(10); @@ -9153,14 +9204,6 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp) static int __devinit tg3_get_invariants(struct tg3 *tp) { static struct pci_device_id write_reorder_chipsets[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82801AA_8) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82801AB_8) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82801BA_11) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82801BA_6) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, { }, @@ -9177,7 +9220,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_SUN_570X; #endif - /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write + /* If we have an AMD 762 chipset, write * reordering to the mailbox registers done by the host * controller can cause major troubles. We read back from * every mailbox register write to force the writes to be @@ -9215,6 +9258,69 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, + * we need to disable memory and use config. cycles + * only to access all registers. The 5702/03 chips + * can mistakenly decode the special cycles from the + * ICH chipsets as memory write cycles, causing corruption + * of register and memory space. Only certain ICH bridges + * will drive special cycles with non-zero data during the + * address phase which can fall within the 5703's address + * range. This is not an ICH bug as the PCI spec allows + * non-zero address during special cycles. However, only + * these ICH bridges are known to drive non-zero addresses + * during special cycles. + * + * Since special cycles do not cross PCI bridges, we only + * enable this workaround if the 5703 is on the secondary + * bus of these ICH bridges. + */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || + (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + u32 rev; + } ich_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, + 0xa }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, + PCI_ANY_ID }, + { }, + }; + struct tg3_dev_id *pci_id = &ich_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (pci_id->rev != PCI_ANY_ID) { + u8 rev; + + pci_read_config_byte(bridge, PCI_REVISION_ID, + &rev); + if (rev > pci_id->rev) + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number == + tp->pdev->bus->number)) { + + tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; + pci_dev_put(bridge); + break; + } + } + } + /* Find msi capability. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); @@ -9302,6 +9408,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } } + /* 5700 BX chips need to have their TX producer index mailboxes + * written twice to workaround a bug. + */ + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) + tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; + /* Back to back register writes can cause problems on this chip, * the workaround is to read back all reg writes except those to * mailbox regs. See tg3_write_indirect_reg32(). @@ -9325,6 +9437,43 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); } + /* Default fast path register access methods */ + tp->read32 = tg3_read32; + tp->write32 = tg3_write32; + tp->read32_mbox = tg3_read32; + tp->write32_mbox = tg3_write32; + tp->write32_tx_mbox = tg3_write32; + tp->write32_rx_mbox = tg3_write32; + + /* Various workaround register access methods */ + if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) + tp->write32 = tg3_write_indirect_reg32; + else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) + tp->write32 = tg3_write_flush_reg32; + + if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || + (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { + tp->read32 = tg3_read_indirect_reg32; + tp->write32 = tg3_write_indirect_reg32; + tp->read32_mbox = tg3_read_indirect_mbox; + tp->write32_mbox = tg3_write_indirect_mbox; + tp->write32_tx_mbox = tg3_write_indirect_mbox; + tp->write32_rx_mbox = tg3_write_indirect_mbox; + + iounmap(tp->regs); + tp->regs = 0; + + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_MEMORY; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + /* Get eeprom hw config before calling tg3_set_power_state(). * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be * determined before calling tg3_set_power_state() so that @@ -9539,14 +9688,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; - /* 5700 BX chips need to have their TX producer index mailboxes - * written twice to workaround a bug. - */ - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) - tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; - else - tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG; - /* It seems all chips can get confused if TX buffers * straddle the 4GB address boundary in some cases. */ @@ -10469,7 +10610,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, return 0; err_out_iounmap: - iounmap(tp->regs); + if (tp->regs) { + iounmap(tp->regs); + tp->regs = 0; + } err_out_free_dev: free_netdev(dev); @@ -10491,7 +10635,10 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) struct tg3 *tp = netdev_priv(dev); unregister_netdev(dev); - iounmap(tp->regs); + if (tp->regs) { + iounmap(tp->regs); + tp->regs = 0; + } free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 5c4433c147fa..c184b773e585 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2049,6 +2049,11 @@ struct tg3 { spinlock_t lock; spinlock_t indirect_lock; + u32 (*read32) (struct tg3 *, u32); + void (*write32) (struct tg3 *, u32, u32); + u32 (*read32_mbox) (struct tg3 *, u32); + void (*write32_mbox) (struct tg3 *, u32, + u32); void __iomem *regs; struct net_device *dev; struct pci_dev *pdev; @@ -2060,6 +2065,8 @@ struct tg3 { u32 msg_enable; /* begin "tx thread" cacheline section */ + void (*write32_tx_mbox) (struct tg3 *, u32, + u32); u32 tx_prod; u32 tx_cons; u32 tx_pending; @@ -2071,6 +2078,8 @@ struct tg3 { dma_addr_t tx_desc_mapping; /* begin "rx thread" cacheline section */ + void (*write32_rx_mbox) (struct tg3 *, u32, + u32); u32 rx_rcb_ptr; u32 rx_std_ptr; u32 rx_jumbo_ptr; @@ -2165,6 +2174,7 @@ struct tg3 { #define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \ TG3_FLG2_MII_SERDES) #define TG3_FLG2_PARALLEL_DETECT 0x01000000 +#define TG3_FLG2_ICH_WORKAROUND 0x02000000 u32 split_mode_max_reqs; #define SPLIT_MODE_5704_MAX_REQ 3 diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c index a63f6a2cc4f7..cdd4c09c2d90 100644 --- a/drivers/net/wan/hdlc_generic.c +++ b/drivers/net/wan/hdlc_generic.c @@ -61,7 +61,7 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev) static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *p) + struct packet_type *p, struct net_device *orig_dev) { hdlc_device *hdlc = dev_to_hdlc(dev); if (hdlc->proto.netif_rx) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 7f2e3653c5e5..6c302e9dbca2 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -86,7 +86,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev) /* * Receive a LAPB frame via an ethernet interface. */ -static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) +static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { int len, err; struct lapbethdev *lapbeth; diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c index c5f5e62aab8b..0497dbdb8631 100644 --- a/drivers/net/wan/sdla_fr.c +++ b/drivers/net/wan/sdla_fr.c @@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags); void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); unsigned short calc_checksum (char *, int); -static int setup_fr_header(struct sk_buff** skb, +static int setup_fr_header(struct sk_buff *skb, struct net_device* dev, char op_mode); @@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev) /* Move the if_header() code to here. By inserting frame * relay header in if_header() we would break the * tcpdump and other packet sniffers */ - chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); + chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby); if (chan->fr_header_len < 0 ){ ++chan->ifstats.tx_dropped; ++card->wandev.stats.tx_dropped; @@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev, return 1; } - skb_unlink(skb); - chan->transmit_length = len; chan->delay_skb = skb; @@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card) } } -static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, +static int setup_fr_header(struct sk_buff *skb, struct net_device* dev, char op_mode) { - struct sk_buff *skb = *skb_orig; fr_channel_t *chan=dev->priv; - if (op_mode == WANPIPE){ - + if (op_mode == WANPIPE) { chan->fr_header[0]=Q922_UI; switch (htons(skb->protocol)){ - case ETH_P_IP: chan->fr_header[1]=NLPID_IP; break; @@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, } /* If we are in bridging mode, we must apply - * an Ethernet header */ - if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ - - + * an Ethernet header + */ + if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) { /* Encapsulate the packet as a bridged Ethernet frame. */ #ifdef DEBUG printk(KERN_INFO "%s: encapsulating skb for frame relay\n", dev->name); #endif - chan->fr_header[0] = 0x03; chan->fr_header[1] = 0x00; chan->fr_header[2] = 0x80; @@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, /* Yuck. */ skb->protocol = ETH_P_802_3; return 8; - } return 0; diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 84b65c60c799..f58c794a963a 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -1447,7 +1447,7 @@ static void sppp_print_bytes (u_char *p, u16 len) * after interrupt servicing to process frames queued via netif_rx. */ -static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) +static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; |