diff options
Diffstat (limited to 'drivers/bus/mhi/core/main.c')
-rw-r--r-- | drivers/bus/mhi/core/main.c | 416 |
1 files changed, 288 insertions, 128 deletions
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 4e0131b94056..22acde118bc3 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -4,6 +4,7 @@ * */ +#include <linux/delay.h> #include <linux/device.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> @@ -37,6 +38,28 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, return 0; } +int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 val, u32 delayus) +{ + int ret; + u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, + &out); + if (ret) + return ret; + + if (out == val) + return 0; + + fsleep(delayus); + } + + return -ETIMEDOUT; +} + void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val) { @@ -242,10 +265,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, smp_wmb(); } +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) +{ + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; +} + int mhi_destroy_device(struct device *dev, void *data) { + struct mhi_chan *ul_chan, *dl_chan; struct mhi_device *mhi_dev; struct mhi_controller *mhi_cntrl; + enum mhi_ee_type ee = MHI_EE_MAX; if (dev->bus != &mhi_bus_type) return 0; @@ -257,6 +287,17 @@ int mhi_destroy_device(struct device *dev, void *data) if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) return 0; + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + /* + * If execution environment is specified, remove only those devices that + * started in them based on ee_mask for the channels as we move on to a + * different execution environment + */ + if (data) + ee = *(enum mhi_ee_type *)data; + /* * For the suspend and resume case, this function will get called * without mhi_unregister_controller(). Hence, we need to drop the @@ -264,11 +305,19 @@ int mhi_destroy_device(struct device *dev, void *data) * be sure that there will be no instances of mhi_dev left after * this. */ - if (mhi_dev->ul_chan) - put_device(&mhi_dev->ul_chan->mhi_dev->dev); + if (ul_chan) { + if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) + return 0; - if (mhi_dev->dl_chan) - put_device(&mhi_dev->dl_chan->mhi_dev->dev); + put_device(&ul_chan->mhi_dev->dev); + } + + if (dl_chan) { + if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) + return 0; + + put_device(&dl_chan->mhi_dev->dev); + } dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", mhi_dev->name); @@ -383,7 +432,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring; - void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + dma_addr_t ptr = er_ctxt->rp; + void *dev_rp; + + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return IRQ_HANDLED; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); /* Only proceed if event ring has pending events */ if (ev_ring->rp == dev_rp) @@ -407,9 +465,9 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) { struct mhi_controller *mhi_cntrl = priv; struct device *dev = &mhi_cntrl->mhi_dev->dev; - enum mhi_state state = MHI_STATE_MAX; + enum mhi_state state; enum mhi_pm_state pm_state = 0; - enum mhi_ee_type ee = 0; + enum mhi_ee_type ee; write_lock_irq(&mhi_cntrl->pm_lock); if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { @@ -418,11 +476,11 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) } state = mhi_get_mhi_state(mhi_cntrl); - ee = mhi_cntrl->ee; - mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); - dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee), - TO_MHI_STATE_STR(state)); + ee = mhi_get_exec_env(mhi_cntrl); + dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); if (state == MHI_STATE_SYS_ERR) { dev_dbg(dev, "System error detected\n"); @@ -431,27 +489,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) } write_unlock_irq(&mhi_cntrl->pm_lock); - /* If device supports RDDM don't bother processing SYS error */ - if (mhi_cntrl->rddm_image) { - /* host may be performing a device power down already */ - if (!mhi_is_active(mhi_cntrl)) - goto exit_intvec; + if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) + goto exit_intvec; - if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) { + switch (ee) { + case MHI_EE_RDDM: + /* proceed if power down is not already in progress */ + if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + mhi_cntrl->ee = ee; wake_up_all(&mhi_cntrl->state_event); } - goto exit_intvec; - } - - if (pm_state == MHI_PM_SYS_ERR_DETECT) { + break; + case MHI_EE_PBL: + case MHI_EE_EDL: + case MHI_EE_PTHRU: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); + mhi_cntrl->ee = ee; wake_up_all(&mhi_cntrl->state_event); - - /* For fatal errors, we let controller decide next step */ - if (MHI_IN_PBL(ee)) - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); - else - mhi_pm_sys_err_handler(mhi_cntrl); + mhi_pm_sys_err_handler(mhi_cntrl); + break; + default: + wake_up_all(&mhi_cntrl->state_event); + mhi_pm_sys_err_handler(mhi_cntrl); + break; } exit_intvec: @@ -536,6 +597,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf_info; u16 xfer_len; + if (!is_valid_ring_ptr(tre_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points outside of the tre ring\n"); + break; + } /* Get the TRB this event points to */ ev_tre = mhi_to_virtual(tre_ring, ptr); @@ -570,8 +636,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, /* notify client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - if (mhi_chan->dir == DMA_TO_DEVICE) + if (mhi_chan->dir == DMA_TO_DEVICE) { atomic_dec(&mhi_cntrl->pending_pkts); + /* Release the reference got from mhi_queue() */ + mhi_cntrl->runtime_put(mhi_cntrl); + } /* * Recycle the buffer if buffer is pre-allocated, @@ -595,15 +664,15 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, case MHI_EV_CC_OOB: case MHI_EV_CC_DB_MODE: { - unsigned long flags; + unsigned long pm_lock_flags; mhi_chan->db_cfg.db_mode = 1; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); if (tre_ring->wp != tre_ring->rp && MHI_DB_ACCESS_VALID(mhi_cntrl)) { mhi_ring_chan_db(mhi_cntrl, mhi_chan); } - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); break; } case MHI_EV_CC_BAD_TRE: @@ -695,6 +764,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan; u32 chan; + if (!is_valid_ring_ptr(mhi_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points outside of the cmd ring\n"); + return; + } + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); @@ -719,6 +794,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0; + dma_addr_t ptr = er_ctxt->rp; /* * This is a quick check to avoid unnecessary event processing @@ -728,7 +804,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO; - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); local_rp = ev_ring->rp; while (dev_rp != local_rp) { @@ -771,14 +853,14 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, break; case MHI_STATE_SYS_ERR: { - enum mhi_pm_state new_state; + enum mhi_pm_state pm_state; dev_dbg(dev, "System error detected\n"); write_lock_irq(&mhi_cntrl->pm_lock); - new_state = mhi_tryset_pm_state(mhi_cntrl, + pm_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); write_unlock_irq(&mhi_cntrl->pm_lock); - if (new_state == MHI_PM_SYS_ERR_DETECT) + if (pm_state == MHI_PM_SYS_ERR_DETECT) mhi_pm_sys_err_handler(mhi_cntrl); break; } @@ -807,6 +889,9 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, case MHI_EE_AMSS: st = DEV_ST_TRANSITION_MISSION_MODE; break; + case MHI_EE_FP: + st = DEV_ST_TRANSITION_FP; + break; case MHI_EE_RDDM: mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); write_lock_irq(&mhi_cntrl->pm_lock); @@ -834,6 +919,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, */ if (chan < mhi_cntrl->max_chan) { mhi_chan = &mhi_cntrl->mhi_chan[chan]; + if (!mhi_chan->configured) + break; parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); event_quota--; } @@ -845,7 +932,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + ptr = er_ctxt->rp; + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); count++; } @@ -868,11 +963,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, int count = 0; u32 chan; struct mhi_chan *mhi_chan; + dma_addr_t ptr = er_ctxt->rp; if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO; - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); local_rp = ev_ring->rp; while (dev_rp != local_rp && event_quota > 0) { @@ -886,7 +988,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, * Only process the event ring elements whose channel * ID is within the maximum supported range. */ - if (chan < mhi_cntrl->max_chan) { + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { mhi_chan = &mhi_cntrl->mhi_chan[chan]; if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { @@ -900,7 +1003,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + ptr = er_ctxt->rp; + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, ptr); count++; } read_lock_bh(&mhi_cntrl->pm_lock); @@ -996,7 +1107,7 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, ret = mhi_is_ring_full(mhi_cntrl, tre_ring); if (unlikely(ret)) { - ret = -ENOMEM; + ret = -EAGAIN; goto exit_unlock; } @@ -1004,9 +1115,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (unlikely(ret)) goto exit_unlock; - /* trigger M3 exit if necessary */ - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); + /* Packet is queued, take a usage ref to exit M3 if necessary + * for host->device buffer, balanced put is done on buffer completion + * for device->host buffer, balanced put is after ringing the DB + */ + mhi_cntrl->runtime_get(mhi_cntrl); /* Assert dev_wake (to exit/prevent M1/M2)*/ mhi_cntrl->wake_toggle(mhi_cntrl); @@ -1014,12 +1127,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); - if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) { - ret = -EIO; - goto exit_unlock; - } + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); - mhi_ring_chan_db(mhi_cntrl, mhi_chan); + if (dir == DMA_FROM_DEVICE) + mhi_cntrl->runtime_put(mhi_cntrl); exit_unlock: read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); @@ -1162,6 +1274,11 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl, cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); break; + case MHI_CMD_STOP_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); + break; case MHI_CMD_START_CHAN: cmd_tre->ptr = MHI_TRE_CMD_START_PTR; cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; @@ -1183,56 +1300,125 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl, return 0; } -static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan) +static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_ch_state_type to_state) { + struct device *dev = &mhi_chan->mhi_dev->dev; + enum mhi_cmd_type cmd = MHI_CMD_NOP; int ret; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan); - /* no more processing events for this channel */ - mutex_lock(&mhi_chan->mutex); - write_lock_irq(&mhi_chan->lock); - if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED && - mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { + dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, + TO_CH_STATE_TYPE_STR(to_state)); + + switch (to_state) { + case MHI_CH_STATE_TYPE_RESET: + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_ENABLED && + mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { + write_unlock_irq(&mhi_chan->lock); + return -EINVAL; + } + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; write_unlock_irq(&mhi_chan->lock); - mutex_unlock(&mhi_chan->mutex); - return; + + cmd = MHI_CMD_RESET_CHAN; + break; + case MHI_CH_STATE_TYPE_STOP: + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + return -EINVAL; + + cmd = MHI_CMD_STOP_CHAN; + break; + case MHI_CH_STATE_TYPE_START: + if (mhi_chan->ch_state != MHI_CH_STATE_STOP && + mhi_chan->ch_state != MHI_CH_STATE_DISABLED) + return -EINVAL; + + cmd = MHI_CMD_START_CHAN; + break; + default: + dev_err(dev, "%d: Channel state update to %s not allowed\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + return -EINVAL; } - mhi_chan->ch_state = MHI_CH_STATE_DISABLED; - write_unlock_irq(&mhi_chan->lock); + /* bring host and device out of suspended states */ + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) + return ret; + mhi_cntrl->runtime_get(mhi_cntrl); reinit_completion(&mhi_chan->completion); - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - read_unlock_bh(&mhi_cntrl->pm_lock); - goto error_invalid_state; + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); + if (ret) { + dev_err(dev, "%d: Failed to send %s channel command\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + goto exit_channel_update; } - mhi_cntrl->wake_toggle(mhi_cntrl); - read_unlock_bh(&mhi_cntrl->pm_lock); + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + dev_err(dev, + "%d: Failed to receive %s channel command completion\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + ret = -EIO; + goto exit_channel_update; + } - mhi_cntrl->runtime_get(mhi_cntrl); + ret = 0; + + if (to_state != MHI_CH_STATE_TYPE_RESET) { + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? + MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; + write_unlock_irq(&mhi_chan->lock); + } + + dev_dbg(dev, "%d: Channel state change to %s successful\n", + mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); + +exit_channel_update: mhi_cntrl->runtime_put(mhi_cntrl); - ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + mhi_device_put(mhi_cntrl->mhi_dev); + + return ret; +} + +static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + struct device *dev = &mhi_chan->mhi_dev->dev; + + mutex_lock(&mhi_chan->mutex); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); + goto exit_unprepare_channel; + } + + /* no more processing events for this channel */ + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_RESET); if (ret) - goto error_invalid_state; + dev_err(dev, "%d: Failed to reset channel, still resetting\n", + mhi_chan->chan); - /* even if it fails we will still reset */ - ret = wait_for_completion_timeout(&mhi_chan->completion, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) - dev_err(dev, - "Failed to receive cmd completion, still resetting\n"); +exit_unprepare_channel: + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); -error_invalid_state: if (!mhi_chan->offload_ch) { mhi_reset_chan(mhi_cntrl, mhi_chan); mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); } - dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan); + dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); } @@ -1240,28 +1426,16 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { int ret = 0; - struct device *dev = &mhi_cntrl->mhi_dev->dev; - - dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan); + struct device *dev = &mhi_chan->mhi_dev->dev; if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { - dev_err(dev, - "Current EE: %s Required EE Mask: 0x%x for chan: %s\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, - mhi_chan->name); + dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); return -ENOTCONN; } mutex_lock(&mhi_chan->mutex); - /* If channel is not in disable state, do not allow it to start */ - if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) { - ret = -EIO; - dev_dbg(dev, "channel: %d is not in disabled state\n", - mhi_chan->chan); - goto error_init_chan; - } - /* Check of client manages channel context for offload channels */ if (!mhi_chan->offload_ch) { ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); @@ -1269,34 +1443,11 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, goto error_init_chan; } - reinit_completion(&mhi_chan->completion); - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { - read_unlock_bh(&mhi_cntrl->pm_lock); - ret = -EIO; - goto error_pm_state; - } - - mhi_cntrl->wake_toggle(mhi_cntrl); - read_unlock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->runtime_get(mhi_cntrl); - mhi_cntrl->runtime_put(mhi_cntrl); - - ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, + MHI_CH_STATE_TYPE_START); if (ret) goto error_pm_state; - ret = wait_for_completion_timeout(&mhi_chan->completion, - msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { - ret = -EIO; - goto error_pm_state; - } - - write_lock_irq(&mhi_chan->lock); - mhi_chan->ch_state = MHI_CH_STATE_ENABLED; - write_unlock_irq(&mhi_chan->lock); - /* Pre-allocate buffer for xfer ring */ if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl, @@ -1334,9 +1485,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, mutex_unlock(&mhi_chan->mutex); - dev_dbg(dev, "Chan: %d successfully moved to start state\n", - mhi_chan->chan); - return 0; error_pm_state: @@ -1350,7 +1498,7 @@ error_init_chan: error_pre_alloc: mutex_unlock(&mhi_chan->mutex); - __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + mhi_unprepare_channel(mhi_cntrl, mhi_chan); return ret; } @@ -1365,6 +1513,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, struct mhi_ring *ev_ring; struct device *dev = &mhi_cntrl->mhi_dev->dev; unsigned long flags; + dma_addr_t ptr; dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); @@ -1372,7 +1521,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags); - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + ptr = er_ctxt->rp; + if (!is_valid_ring_ptr(ev_ring, ptr)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event ring rp points outside of the event ring\n"); + dev_rp = ev_ring->rp; + } else { + dev_rp = mhi_to_virtual(ev_ring, ptr); + } local_rp = ev_ring->rp; while (dev_rp != local_rp) { @@ -1403,8 +1560,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, while (tre_ring->rp != tre_ring->wp) { struct mhi_buf_info *buf_info = buf_ring->rp; - if (mhi_chan->dir == DMA_TO_DEVICE) + if (mhi_chan->dir == DMA_TO_DEVICE) { atomic_dec(&mhi_cntrl->pending_pkts); + /* Release the reference got from mhi_queue() */ + mhi_cntrl->runtime_put(mhi_cntrl); + } if (!buf_info->pre_mapped) mhi_cntrl->unmap_single(mhi_cntrl, buf_info); @@ -1467,7 +1627,7 @@ error_open_chan: if (!mhi_chan) continue; - __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + mhi_unprepare_channel(mhi_cntrl, mhi_chan); } return ret; @@ -1485,7 +1645,7 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) if (!mhi_chan) continue; - __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + mhi_unprepare_channel(mhi_cntrl, mhi_chan); } } EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); |