From 687b13e98addc99644002703944ec89e94287cb6 Mon Sep 17 00:00:00 2001 From: Daniel Martensson Date: Thu, 13 Oct 2011 11:29:25 +0000 Subject: caif-hsi: Making read and writes asynchronous. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some platforms do not allow to put HSI block into low-power mode when FIFO is not empty. The patch flushes (by reading) FIFO at wake down sequence. Asynchronous read and write is implemented for that. As a side effect this will also greatly improve performance. Signed-off-by: Sjur Brændeland Signed-off-by: David S. Miller --- drivers/net/caif/caif_hsi.c | 263 +++++++++++++++++++++----------------------- 1 file changed, 127 insertions(+), 136 deletions(-) (limited to 'drivers/net/caif/caif_hsi.c') diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 82c4d6ca2d3f..478b025c9f8b 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -145,7 +145,7 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) } ret = 5 * HZ; - wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, + ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); if (ret < 0) { @@ -272,16 +272,13 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) return CFHSI_DESC_SZ + pld_len; } -static void cfhsi_tx_done_work(struct work_struct *work) +static void cfhsi_tx_done(struct cfhsi *cfhsi) { - struct cfhsi *cfhsi = NULL; struct cfhsi_desc *desc = NULL; int len = 0; int res; - cfhsi = container_of(work, struct cfhsi, tx_done_work); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", - __func__); + dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; @@ -343,11 +340,11 @@ static void cfhsi_tx_done_cb(struct cfhsi_drv *drv) if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; - - queue_work(cfhsi->wq, &cfhsi->tx_done_work); + cfhsi_tx_done(cfhsi); } -static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) +static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi, + bool *dump) { int xfer_sz = 0; int nfrms = 0; @@ -358,6 +355,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", __func__); + *dump = true; return 0; } @@ -365,7 +363,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) if (desc->offset) { struct sk_buff *skb; u8 *dst = NULL; - int len = 0, retries = 0; + int len = 0; pfrm = ((u8 *)desc) + desc->offset; /* Remove offset padding. */ @@ -378,24 +376,11 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_KERNEL); - while (!skb) { - retries++; - schedule_timeout(1); - skb = alloc_skb(len + 1, GFP_KERNEL); - if (skb) { - printk(KERN_WARNING "%s: slept for %u " - "before getting memory\n", - __func__, retries); - break; - } - if (retries > HZ) { - printk(KERN_ERR "%s: slept for 1HZ and " - "did not get memory\n", - __func__); - cfhsi->ndev->stats.rx_dropped++; - goto drop_frame; - } + skb = alloc_skb(len + 1, GFP_ATOMIC); + if (!skb) { + dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", + __func__); + return -ENOMEM; } caif_assert(skb != NULL); @@ -421,7 +406,6 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) cfhsi->ndev->stats.rx_bytes += len; } -drop_frame: /* Calculate transfer length. */ plen = desc->cffrm_len; while (nfrms < CFHSI_MAX_PKTS && *plen) { @@ -439,12 +423,13 @@ drop_frame: "%s: Invalid payload len: %d, ignored.\n", __func__, xfer_sz); xfer_sz = 0; + *dump = true; } - return xfer_sz; } -static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) +static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi, + bool *dump) { int rx_sz = 0; int nfrms = 0; @@ -456,21 +441,33 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", __func__); + *dump = true; return -EINVAL; } /* Set frame pointer to start of payload. */ pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; plen = desc->cffrm_len; + + /* Skip already processed frames. */ + while (nfrms < cfhsi->rx_state.nfrms) { + pfrm += *plen; + rx_sz += *plen; + plen++; + nfrms++; + } + + /* Parse payload. */ while (nfrms < CFHSI_MAX_PKTS && *plen) { struct sk_buff *skb; u8 *dst = NULL; u8 *pcffrm = NULL; - int len = 0, retries = 0; + int len = 0; if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) { dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n", __func__); + *dump = true; return -EINVAL; } @@ -483,24 +480,12 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) len += 2; /* Add FCS fields. */ /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_KERNEL); - while (!skb) { - retries++; - schedule_timeout(1); - skb = alloc_skb(len + 1, GFP_KERNEL); - if (skb) { - printk(KERN_WARNING "%s: slept for %u " - "before getting memory\n", - __func__, retries); - break; - } - if (retries > HZ) { - printk(KERN_ERR "%s: slept for 1HZ " - "and did not get memory\n", - __func__); - cfhsi->ndev->stats.rx_dropped++; - goto drop_frame; - } + skb = alloc_skb(len + 1, GFP_ATOMIC); + if (!skb) { + dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", + __func__); + cfhsi->rx_state.nfrms = nfrms; + return -ENOMEM; } caif_assert(skb != NULL); @@ -524,7 +509,6 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) cfhsi->ndev->stats.rx_packets++; cfhsi->ndev->stats.rx_bytes += len; -drop_frame: pfrm += *plen; rx_sz += *plen; plen++; @@ -534,18 +518,16 @@ drop_frame: return rx_sz; } -static void cfhsi_rx_done_work(struct work_struct *work) +static void cfhsi_rx_done(struct cfhsi *cfhsi) { int res; int desc_pld_len = 0; - struct cfhsi *cfhsi = NULL; struct cfhsi_desc *desc = NULL; + bool dump = false; - cfhsi = container_of(work, struct cfhsi, rx_done_work); desc = (struct cfhsi_desc *)cfhsi->rx_buf; - dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n", - __func__); + dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; @@ -555,21 +537,33 @@ static void cfhsi_rx_done_work(struct work_struct *work) mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT); spin_unlock_bh(&cfhsi->lock); - if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) { - desc_pld_len = cfhsi_rx_desc(desc, cfhsi); + if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { + desc_pld_len = cfhsi_rx_desc(desc, cfhsi, &dump); + if (desc_pld_len == -ENOMEM) + goto restart; } else { int pld_len; - pld_len = cfhsi_rx_pld(desc, cfhsi); + if (!cfhsi->rx_state.piggy_desc) { + pld_len = cfhsi_rx_pld(desc, cfhsi, &dump); + if (pld_len == -ENOMEM) + goto restart; + cfhsi->rx_state.pld_len = pld_len; + } else { + pld_len = cfhsi->rx_state.pld_len; + } if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) { struct cfhsi_desc *piggy_desc; piggy_desc = (struct cfhsi_desc *) (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + pld_len); + cfhsi->rx_state.piggy_desc = true; /* Extract piggy-backed descriptor. */ - desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi); + desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi, &dump); + if (desc_pld_len == -ENOMEM) + goto restart; /* * Copy needed information from the piggy-backed @@ -580,16 +574,24 @@ static void cfhsi_rx_done_work(struct work_struct *work) } } + if (unlikely(dump)) { + size_t rx_offset = cfhsi->rx_ptr - cfhsi->rx_buf; + dev_err(&cfhsi->ndev->dev, "%s: RX offset: %u.\n", + __func__, (unsigned) rx_offset); + print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, + cfhsi->rx_buf, cfhsi->rx_len + rx_offset); + } + + memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); if (desc_pld_len) { - cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD; + cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD; cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ; cfhsi->rx_len = desc_pld_len; } else { - cfhsi->rx_state = CFHSI_RX_STATE_DESC; + cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; cfhsi->rx_ptr = cfhsi->rx_buf; cfhsi->rx_len = CFHSI_DESC_SZ; } - clear_bit(CFHSI_PENDING_RX, &cfhsi->bits); if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { /* Set up new transfer. */ @@ -604,6 +606,26 @@ static void cfhsi_rx_done_work(struct work_struct *work) cfhsi->ndev->stats.rx_dropped++; } } + return; + +restart: + if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) { + dev_err(&cfhsi->ndev->dev, "%s: No memory available " + "in %d iterations.\n", + __func__, CFHSI_MAX_RX_RETRIES); + BUG(); + } + mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1); +} + +static void cfhsi_rx_slowpath(unsigned long arg) +{ + struct cfhsi *cfhsi = (struct cfhsi *)arg; + + dev_dbg(&cfhsi->ndev->dev, "%s.\n", + __func__); + + cfhsi_rx_done(cfhsi); } static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) @@ -617,12 +639,10 @@ static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; - set_bit(CFHSI_PENDING_RX, &cfhsi->bits); - if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) wake_up_interruptible(&cfhsi->flush_fifo_wait); else - queue_work(cfhsi->wq, &cfhsi->rx_done_work); + cfhsi_rx_done(cfhsi); } static void cfhsi_wake_up(struct work_struct *work) @@ -651,9 +671,9 @@ static void cfhsi_wake_up(struct work_struct *work) __func__); /* Wait for acknowledge. */ - ret = CFHSI_WAKEUP_TOUT; - wait_event_interruptible_timeout(cfhsi->wake_up_wait, - test_bit(CFHSI_WAKE_UP_ACK, + ret = CFHSI_WAKE_TOUT; + ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, + test_and_clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits), ret); if (unlikely(ret < 0)) { /* Interrupted by signal. */ @@ -678,16 +698,11 @@ static void cfhsi_wake_up(struct work_struct *work) clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); /* Resume read operation. */ - if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) { - dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", - __func__); - res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, - cfhsi->rx_len, cfhsi->dev); - if (WARN_ON(res < 0)) { - dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", - __func__, res); - } - } + dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__); + res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev); + + if (WARN_ON(res < 0)) + dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res); /* Clear power up acknowledment. */ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); @@ -726,50 +741,29 @@ static void cfhsi_wake_up(struct work_struct *work) "%s: Failed to create HSI frame: %d.\n", __func__, len); } - } static void cfhsi_wake_down(struct work_struct *work) { long ret; struct cfhsi *cfhsi = NULL; - size_t fifo_occupancy; + size_t fifo_occupancy = 0; + int retry = CFHSI_WAKE_TOUT; cfhsi = container_of(work, struct cfhsi, wake_down_work); - dev_dbg(&cfhsi->ndev->dev, "%s.\n", - __func__); + dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) return; - /* Check if there is something in FIFO. */ - if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, - &fifo_occupancy))) - fifo_occupancy = 0; - - if (fifo_occupancy) { - dev_dbg(&cfhsi->ndev->dev, - "%s: %u words in RX FIFO, restart timer.\n", - __func__, (unsigned) fifo_occupancy); - spin_lock_bh(&cfhsi->lock); - mod_timer(&cfhsi->timer, - jiffies + CFHSI_INACTIVITY_TOUT); - spin_unlock_bh(&cfhsi->lock); - return; - } - - /* Cancel pending RX requests */ - cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); - /* Deactivate wake line. */ cfhsi->dev->cfhsi_wake_down(cfhsi->dev); /* Wait for acknowledge. */ - ret = CFHSI_WAKEUP_TOUT; + ret = CFHSI_WAKE_TOUT; ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, - test_bit(CFHSI_WAKE_DOWN_ACK, - &cfhsi->bits), - ret); + test_and_clear_bit(CFHSI_WAKE_DOWN_ACK, + &cfhsi->bits), ret); if (ret < 0) { /* Interrupted by signal. */ dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", @@ -777,28 +771,31 @@ static void cfhsi_wake_down(struct work_struct *work) return; } else if (!ret) { /* Timeout */ - dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", - __func__); + dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); } - /* Clear power down acknowledment. */ - clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); + /* Check FIFO occupancy. */ + while (retry) { + WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, + &fifo_occupancy)); + + if (!fifo_occupancy) + break; + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + retry--; + } + + if (!retry) + dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__); + + /* Clear AWAKE condition. */ clear_bit(CFHSI_AWAKE, &cfhsi->bits); - /* Check if there is something in FIFO. */ - if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, - &fifo_occupancy))) - fifo_occupancy = 0; + /* Cancel pending RX requests. */ + cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); - if (fifo_occupancy) { - dev_dbg(&cfhsi->ndev->dev, - "%s: %u words in RX FIFO, wakeup forced.\n", - __func__, (unsigned) fifo_occupancy); - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); - } else - dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n", - __func__); } static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) @@ -874,11 +871,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) } /* Delete inactivity timer if started. */ -#ifdef CONFIG_SMP timer_active = del_timer_sync(&cfhsi->timer); -#else - timer_active = del_timer(&cfhsi->timer); -#endif /* CONFIG_SMP */ spin_unlock_bh(&cfhsi->lock); @@ -962,7 +955,7 @@ int cfhsi_probe(struct platform_device *pdev) /* Initialize state vaiables. */ cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - cfhsi->rx_state = CFHSI_RX_STATE_DESC; + cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; /* Set flow info */ cfhsi->flow_off_sent = 0; @@ -1012,15 +1005,12 @@ int cfhsi_probe(struct platform_device *pdev) /* Initialize the work queues. */ INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); - INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work); - INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work); /* Clear all bit fields. */ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); clear_bit(CFHSI_AWAKE, &cfhsi->bits); - clear_bit(CFHSI_PENDING_RX, &cfhsi->bits); /* Create work thread. */ cfhsi->wq = create_singlethread_workqueue(pdev->name); @@ -1040,6 +1030,10 @@ int cfhsi_probe(struct platform_device *pdev) init_timer(&cfhsi->timer); cfhsi->timer.data = (unsigned long)cfhsi; cfhsi->timer.function = cfhsi_inactivity_tout; + /* Setup the slowpath RX timer. */ + init_timer(&cfhsi->rx_slowpath_timer); + cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; + cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; /* Add CAIF HSI device to list. */ spin_lock(&cfhsi_list_lock); @@ -1110,12 +1104,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev) /* Flush workqueue */ flush_workqueue(cfhsi->wq); - /* Delete timer if pending */ -#ifdef CONFIG_SMP + /* Delete timers if pending */ del_timer_sync(&cfhsi->timer); -#else - del_timer(&cfhsi->timer); -#endif /* CONFIG_SMP */ + del_timer_sync(&cfhsi->rx_slowpath_timer); /* Cancel pending RX request (if any) */ cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); -- cgit v1.2.3