diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-22 10:24:58 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-22 10:24:58 -0800 |
commit | bdb39c9509e6d31943cb29dbb6ccd1b64013fb98 (patch) | |
tree | 36bf88ee1db29c69f0e488b7f537b2907ebff095 /drivers/scsi/ufs/ufshcd.c | |
parent | 325b764089c9bef2be45354db4f15e5b12ae406d (diff) | |
parent | d2aacd36a8e00bc1813841b482e3933acb1ea0b5 (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This series consists of the usual driver updates (ufs, ibmvfc,
qla2xxx, hisi_sas, pm80xx) plus the removal of the gdth driver (which
is bound to cause conflicts with a trivial change somewhere).
The only big major rework of note is the one from Hannes trying to
clean up our result handling code in the drivers to make it
consistent"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (194 commits)
scsi: MAINTAINERS: Adjust to reflect gdth scsi driver removal
scsi: ufs: Give clk scaling min gear a value
scsi: lpfc: Fix 'physical' typos
scsi: megaraid_mbox: Fix spelling of 'allocated'
scsi: qla2xxx: Simplify the calculation of variables
scsi: message: fusion: Fix 'physical' typos
scsi: target: core: Change ASCQ for residual write
scsi: target: core: Signal WRITE residuals
scsi: target: core: Set residuals for 4Kn devices
scsi: hisi_sas: Add trace FIFO debugfs support
scsi: hisi_sas: Flush workqueue in hisi_sas_v3_remove()
scsi: hisi_sas: Enable debugfs support by default
scsi: hisi_sas: Don't check .nr_hw_queues in hisi_sas_task_prep()
scsi: hisi_sas: Remove deferred probe check in hisi_sas_v2_probe()
scsi: lpfc: Add auto select on IRQ_POLL
scsi: ncr53c8xx: Fix typos
scsi: lpfc: Fix ancient double free
scsi: qla2xxx: Fix some memory corruption
scsi: qla2xxx: Remove redundant NULL check
scsi: megaraid: Fix ifnullfree.cocci warnings
...
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 568 |
1 files changed, 307 insertions, 261 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 4c24eb782835..721f55db181f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -20,6 +20,7 @@ #include "ufs_quirks.h" #include "unipro.h" #include "ufs-sysfs.h" +#include "ufs-debugfs.h" #include "ufs_bsg.h" #include "ufshcd-crypto.h" #include <asm/unaligned.h> @@ -94,6 +95,8 @@ 16, 4, buf, __len, false); \ } while (0) +static bool early_suspend; + int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix) { @@ -244,11 +247,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg); static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); -static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba); -static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba); -static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable); static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set); -static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable); +static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable); static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); @@ -306,53 +306,67 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba) } static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, - const char *str) + enum ufs_trace_str_t str_t) { struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; - trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb); + if (!trace_ufshcd_upiu_enabled()) + return; + + trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb, + UFS_TSF_CDB); } -static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag, - const char *str) +static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, + enum ufs_trace_str_t str_t, + struct utp_upiu_req *rq_rsp) { - struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; + if (!trace_ufshcd_upiu_enabled()) + return; - trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr); + trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, + &rq_rsp->qr, UFS_TSF_OSF); } static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, - const char *str) + enum ufs_trace_str_t str_t) { int off = (int)tag - hba->nutrs; struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off]; - trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header, - &descp->input_param1); + if (!trace_ufshcd_upiu_enabled()) + return; + + if (str_t == UFS_TM_SEND) + trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header, + &descp->input_param1, UFS_TSF_TM_INPUT); + else + trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header, + &descp->output_param1, UFS_TSF_TM_OUTPUT); } static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, struct uic_command *ucmd, - const char *str) + enum ufs_trace_str_t str_t) { u32 cmd; if (!trace_ufshcd_uic_command_enabled()) return; - if (!strcmp(str, "send")) + if (str_t == UFS_CMD_SEND) cmd = ucmd->command; else cmd = ufshcd_readl(hba, REG_UIC_COMMAND); - trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd, + trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); } -static void ufshcd_add_command_trace(struct ufs_hba *hba, - unsigned int tag, const char *str) +static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, + enum ufs_trace_str_t str_t) { sector_t lba = -1; u8 opcode = 0, group_id = 0; @@ -364,13 +378,13 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ if (cmd) - ufshcd_add_cmd_upiu_trace(hba, tag, str); + ufshcd_add_cmd_upiu_trace(hba, tag, str_t); return; } if (cmd) { /* data phase exists */ /* trace UPIU also */ - ufshcd_add_cmd_upiu_trace(hba, tag, str); + ufshcd_add_cmd_upiu_trace(hba, tag, str_t); opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* @@ -393,7 +407,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - trace_ufshcd_command(dev_name(hba->dev), str, tag, + trace_ufshcd_command(dev_name(hba->dev), str_t, tag, doorbell, transfer_len, intr, lba, opcode, group_id); } @@ -591,8 +605,8 @@ static void ufshcd_device_reset(struct ufs_hba *hba) if (!err) { ufshcd_set_ufs_dev_active(hba); if (ufshcd_is_wb_allowed(hba)) { - hba->wb_enabled = false; - hba->wb_buf_flush_enabled = false; + hba->dev_info.wb_enabled = false; + hba->dev_info.wb_buf_flush_enabled = false; } } if (err != -EOPNOTSUPP) @@ -1182,19 +1196,30 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) */ ufshcd_scsi_block_requests(hba); down_write(&hba->clk_scaling_lock); - if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { + + if (!hba->clk_scaling.is_allowed || + ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { ret = -EBUSY; up_write(&hba->clk_scaling_lock); ufshcd_scsi_unblock_requests(hba); + goto out; } + /* let's not get into low power until clock scaling is completed */ + ufshcd_hold(hba, false); + +out: return ret; } -static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) { - up_write(&hba->clk_scaling_lock); + if (writelock) + up_write(&hba->clk_scaling_lock); + else + up_read(&hba->clk_scaling_lock); ufshcd_scsi_unblock_requests(hba); + ufshcd_release(hba); } /** @@ -1209,13 +1234,11 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) { int ret = 0; - - /* let's not get into low power until clock scaling is completed */ - ufshcd_hold(hba, false); + bool is_writelock = true; ret = ufshcd_clock_scaling_prepare(hba); if (ret) - goto out; + return ret; /* scale down the gear before scaling down clocks */ if (!scale_up) { @@ -1241,14 +1264,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) } /* Enable Write Booster if we have scaled up else disable it */ - up_write(&hba->clk_scaling_lock); + downgrade_write(&hba->clk_scaling_lock); + is_writelock = false; ufshcd_wb_ctrl(hba, scale_up); - down_write(&hba->clk_scaling_lock); out_unprepare: - ufshcd_clock_scaling_unprepare(hba); -out: - ufshcd_release(hba); + ufshcd_clock_scaling_unprepare(hba, is_writelock); return ret; } @@ -1329,15 +1350,8 @@ static int ufshcd_devfreq_target(struct device *dev, } spin_unlock_irqrestore(hba->host->host_lock, irq_flags); - pm_runtime_get_noresume(hba->dev); - if (!pm_runtime_active(hba->dev)) { - pm_runtime_put_noidle(hba->dev); - ret = -EAGAIN; - goto out; - } start = ktime_get(); ret = ufshcd_devfreq_scale(hba, scale_up); - pm_runtime_put(hba->dev); trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), (scale_up ? "up" : "down"), @@ -1484,8 +1498,8 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) unsigned long flags; bool suspend = false; - if (!ufshcd_is_clkscaling_supported(hba)) - return; + cancel_work_sync(&hba->clk_scaling.suspend_work); + cancel_work_sync(&hba->clk_scaling.resume_work); spin_lock_irqsave(hba->host->host_lock, flags); if (!hba->clk_scaling.is_suspended) { @@ -1503,9 +1517,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba) unsigned long flags; bool resume = false; - if (!ufshcd_is_clkscaling_supported(hba)) - return; - spin_lock_irqsave(hba->host->host_lock, flags); if (hba->clk_scaling.is_suspended) { resume = true; @@ -1522,7 +1533,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed); + return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled); } static ssize_t ufshcd_clkscale_enable_store(struct device *dev, @@ -1530,22 +1541,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, { struct ufs_hba *hba = dev_get_drvdata(dev); u32 value; - int err; + int err = 0; if (kstrtou32(buf, 0, &value)) return -EINVAL; + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + err = -EBUSY; + goto out; + } + value = !!value; - if (value == hba->clk_scaling.is_allowed) + if (value == hba->clk_scaling.is_enabled) goto out; pm_runtime_get_sync(hba->dev); ufshcd_hold(hba, false); - cancel_work_sync(&hba->clk_scaling.suspend_work); - cancel_work_sync(&hba->clk_scaling.resume_work); - - hba->clk_scaling.is_allowed = value; + hba->clk_scaling.is_enabled = value; if (value) { ufshcd_resume_clkscaling(hba); @@ -1560,10 +1574,11 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, ufshcd_release(hba); pm_runtime_put_sync(hba->dev); out: - return count; + up(&hba->host_sem); + return err ? err : count; } -static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) +static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) { hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; @@ -1574,6 +1589,45 @@ static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); } +static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) +{ + if (hba->clk_scaling.enable_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); +} + +static void ufshcd_init_clk_scaling(struct ufs_hba *hba) +{ + char wq_name[sizeof("ufs_clkscaling_00")]; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + if (!hba->clk_scaling.min_gear) + hba->clk_scaling.min_gear = UFS_HS_G1; + + INIT_WORK(&hba->clk_scaling.suspend_work, + ufshcd_clk_scaling_suspend_work); + INIT_WORK(&hba->clk_scaling.resume_work, + ufshcd_clk_scaling_resume_work); + + snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", + hba->host->host_no); + hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + + hba->clk_scaling.is_initialized = true; +} + +static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) +{ + if (!hba->clk_scaling.is_initialized) + return; + + ufshcd_remove_clk_scaling_sysfs(hba); + destroy_workqueue(hba->clk_scaling.workq); + ufshcd_devfreq_remove(hba); + hba->clk_scaling.is_initialized = false; +} + static void ufshcd_ungate_work(struct work_struct *work) { int ret; @@ -1865,35 +1919,31 @@ out: return count; } -static void ufshcd_init_clk_scaling(struct ufs_hba *hba) +static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) { - char wq_name[sizeof("ufs_clkscaling_00")]; - - if (!ufshcd_is_clkscaling_supported(hba)) - return; - - if (!hba->clk_scaling.min_gear) - hba->clk_scaling.min_gear = UFS_HS_G1; - - INIT_WORK(&hba->clk_scaling.suspend_work, - ufshcd_clk_scaling_suspend_work); - INIT_WORK(&hba->clk_scaling.resume_work, - ufshcd_clk_scaling_resume_work); - - snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", - hba->host->host_no); - hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; + hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; + sysfs_attr_init(&hba->clk_gating.delay_attr.attr); + hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; + hba->clk_gating.delay_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); - ufshcd_clkscaling_init_sysfs(hba); + hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; + hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; + sysfs_attr_init(&hba->clk_gating.enable_attr.attr); + hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; + hba->clk_gating.enable_attr.attr.mode = 0644; + if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); } -static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) +static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) { - if (!ufshcd_is_clkscaling_supported(hba)) - return; - - destroy_workqueue(hba->clk_scaling.workq); - ufshcd_devfreq_remove(hba); + if (hba->clk_gating.delay_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_gating.delay_attr); + if (hba->clk_gating.enable_attr.attr.name) + device_remove_file(hba->dev, &hba->clk_gating.enable_attr); } static void ufshcd_init_clk_gating(struct ufs_hba *hba) @@ -1914,34 +1964,21 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, WQ_MEM_RECLAIM | WQ_HIGHPRI); - hba->clk_gating.is_enabled = true; - - hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; - hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; - sysfs_attr_init(&hba->clk_gating.delay_attr.attr); - hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; - hba->clk_gating.delay_attr.attr.mode = 0644; - if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) - dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); + ufshcd_init_clk_gating_sysfs(hba); - hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; - hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; - sysfs_attr_init(&hba->clk_gating.enable_attr.attr); - hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; - hba->clk_gating.enable_attr.attr.mode = 0644; - if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) - dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); + hba->clk_gating.is_enabled = true; + hba->clk_gating.is_initialized = true; } static void ufshcd_exit_clk_gating(struct ufs_hba *hba) { - if (!ufshcd_is_clkgating_allowed(hba)) + if (!hba->clk_gating.is_initialized) return; - device_remove_file(hba->dev, &hba->clk_gating.delay_attr); - device_remove_file(hba->dev, &hba->clk_gating.enable_attr); + ufshcd_remove_clk_gating_sysfs(hba); cancel_work_sync(&hba->clk_gating.ungate_work); cancel_delayed_work_sync(&hba->clk_gating.gate_work); destroy_workqueue(hba->clk_gating.clk_gating_workq); + hba->clk_gating.is_initialized = false; } /* Must be called with host lock acquired */ @@ -1956,7 +1993,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) if (!hba->clk_scaling.active_reqs++) queue_resume_work = true; - if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress) + if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) return; if (queue_resume_work) @@ -2002,7 +2039,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) lrbp->issue_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_set(0, 0); ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false)); - ufshcd_add_command_trace(hba, task_tag, "send"); + ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); @@ -2138,7 +2175,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); - ufshcd_add_uic_command_trace(hba, uic_cmd, "send"); + ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); /* Write UIC Cmd */ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, @@ -2857,7 +2894,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, hba->dev_cmd.complete = &wait; - ufshcd_add_query_upiu_trace(hba, tag, "query_send"); + ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); /* Make sure descriptors are ready before ringing the doorbell */ wmb(); spin_lock_irqsave(hba->host->host_lock, flags); @@ -2867,8 +2904,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); out: - ufshcd_add_query_upiu_trace(hba, tag, - err ? "query_complete_err" : "query_complete"); + ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, + (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); out_put_tag: blk_put_request(req); @@ -3425,7 +3462,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -4218,25 +4255,27 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), pwr_mode->hs_rate); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), - DL_FC0ProtectionTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), - DL_TC0ReplayTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), - DL_AFC0ReqTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), - DL_FC1ProtectionTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), - DL_TC1ReplayTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), - DL_AFC1ReqTimeOutVal_Default); - - ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), - DL_FC0ProtectionTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), - DL_TC0ReplayTimeOutVal_Default); - ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), - DL_AFC0ReqTimeOutVal_Default); + if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + } ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | pwr_mode->pwr_tx); @@ -4543,6 +4582,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) e = &hba->ufs_stats.event[id]; e->val[e->pos] = val; e->tstamp[e->pos] = ktime_get(); + e->cnt += 1; e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH; ufshcd_vops_event_notify(hba, id, &val); @@ -4827,6 +4867,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) struct request_queue *q = sdev->request_queue; blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) + blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); if (ufshcd_is_rpm_autosuspend_allowed(hba)) sdev->rpm_autosuspend = 1; @@ -4872,9 +4914,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) ufshcd_copy_sense_data(lrbp); fallthrough; case SAM_STAT_GOOD: - result |= DID_OK << 16 | - COMMAND_COMPLETE << 8 | - scsi_status; + result |= DID_OK << 16 | scsi_status; break; case SAM_STAT_TASK_SET_FULL: case SAM_STAT_BUSY: @@ -5032,7 +5072,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) if (retval == IRQ_HANDLED) ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, - "complete"); + UFS_CMD_COMP); return retval; } @@ -5056,7 +5096,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, lrbp->compl_time_stamp = ktime_get(); cmd = lrbp->cmd; if (cmd) { - ufshcd_add_command_trace(hba, index, "complete"); + ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); result = ufshcd_transfer_rsp_status(hba, lrbp); scsi_dma_unmap(cmd); cmd->result = result; @@ -5070,7 +5110,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { if (hba->dev_cmd.complete) { ufshcd_add_command_trace(hba, index, - "dev_complete"); + UFS_DEV_COMP); complete(hba->dev_cmd.complete); update_scaling = true; } @@ -5390,7 +5430,7 @@ out: __func__, err); } -static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable) +int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable) { int ret; u8 index; @@ -5399,7 +5439,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable) if (!ufshcd_is_wb_allowed(hba)) return 0; - if (!(enable ^ hba->wb_enabled)) + if (!(enable ^ hba->dev_info.wb_enabled)) return 0; if (enable) opcode = UPIU_QUERY_OPCODE_SET_FLAG; @@ -5415,7 +5455,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable) return ret; } - hba->wb_enabled = enable; + hba->dev_info.wb_enabled = enable; dev_dbg(hba->dev, "%s write booster %s %d\n", __func__, enable ? "enable" : "disable", ret); @@ -5438,58 +5478,37 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set) index, NULL); } -static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) -{ - if (enable) - ufshcd_wb_buf_flush_enable(hba); - else - ufshcd_wb_buf_flush_disable(hba); - -} - -static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba) +static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) { int ret; u8 index; + enum query_opcode opcode; - if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled) + if (!ufshcd_is_wb_allowed(hba) || + hba->dev_info.wb_buf_flush_enabled == enable) return 0; - index = ufshcd_wb_get_query_index(hba); - ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, - QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, - index, NULL); - if (ret) - dev_err(hba->dev, "%s WB - buf flush enable failed %d\n", - __func__, ret); + if (enable) + opcode = UPIU_QUERY_OPCODE_SET_FLAG; else - hba->wb_buf_flush_enabled = true; - - dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret); - return ret; -} - -static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba) -{ - int ret; - u8 index; - - if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled) - return 0; + opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG; index = ufshcd_wb_get_query_index(hba); - ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, - QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, - index, NULL); + ret = ufshcd_query_flag_retry(hba, opcode, + QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index, + NULL); if (ret) { - dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n", - __func__, ret); - } else { - hba->wb_buf_flush_enabled = false; - dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret); + dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__, + enable ? "enable" : "disable", ret); + goto out; } + hba->dev_info.wb_buf_flush_enabled = enable; + + dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled"); +out: return ret; + } static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, @@ -5714,6 +5733,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) } } +static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) +{ + down_write(&hba->clk_scaling_lock); + hba->clk_scaling.is_allowed = allow; + up_write(&hba->clk_scaling_lock); +} + +static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) +{ + if (suspend) { + if (hba->clk_scaling.is_enabled) + ufshcd_suspend_clkscaling(hba); + ufshcd_clk_scaling_allow(hba, false); + } else { + ufshcd_clk_scaling_allow(hba, true); + if (hba->clk_scaling.is_enabled) + ufshcd_resume_clkscaling(hba); + } +} + static void ufshcd_err_handling_prepare(struct ufs_hba *hba) { pm_runtime_get_sync(hba->dev); @@ -5738,27 +5777,27 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_vops_resume(hba, pm_op); } else { ufshcd_hold(hba, false); - if (hba->clk_scaling.is_allowed) { - cancel_work_sync(&hba->clk_scaling.suspend_work); - cancel_work_sync(&hba->clk_scaling.resume_work); + if (ufshcd_is_clkscaling_supported(hba) && + hba->clk_scaling.is_enabled) ufshcd_suspend_clkscaling(hba); - } + ufshcd_clk_scaling_allow(hba, false); } } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { ufshcd_release(hba); - if (hba->clk_scaling.is_allowed) - ufshcd_resume_clkscaling(hba); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); pm_runtime_put(hba->dev); } static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) { - return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR || + return (!hba->is_powered || hba->shutting_down || + hba->ufshcd_state == UFSHCD_STATE_ERROR || (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || - ufshcd_is_link_broken(hba)))); + ufshcd_is_link_broken(hba)))); } #ifdef CONFIG_PM @@ -5828,13 +5867,13 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); - down(&hba->eh_sem); + down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); if (ufshcd_err_handling_should_stop(hba)) { if (hba->ufshcd_state != UFSHCD_STATE_ERROR) hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; spin_unlock_irqrestore(hba->host->host_lock, flags); - up(&hba->eh_sem); + up(&hba->host_sem); return; } ufshcd_set_eh_in_progress(hba); @@ -6003,7 +6042,7 @@ skip_err_handling: spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_scsi_unblock_requests(hba); ufshcd_err_handling_unprepare(hba); - up(&hba->eh_sem); + up(&hba->host_sem); if (!err && needs_reset) ufshcd_clear_ua_wluns(hba); @@ -6293,8 +6332,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) while (intr_status && retries--) { enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - if (intr_status) - ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); if (enabled_intr_status) retval |= ufshcd_sl_intr(hba, enabled_intr_status); @@ -6380,7 +6418,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, spin_unlock_irqrestore(host->host_lock, flags); - ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); /* wait until the task management command is completed */ err = wait_for_completion_io_timeout(&wait, @@ -6391,7 +6429,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, * use-after-free. */ req->end_io_data = NULL; - ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); if (ufshcd_clear_tm_cmd(hba, free_slot)) @@ -6402,7 +6440,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, err = 0; memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); - ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); } spin_lock_irqsave(hba->host->host_lock, flags); @@ -7248,6 +7286,7 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) struct ufs_dev_info *dev_info = &hba->dev_info; u8 lun; u32 d_lu_wb_buf_alloc; + u32 ext_ufs_feature; if (!ufshcd_is_wb_allowed(hba)) return; @@ -7265,30 +7304,25 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4) goto wb_disabled; - dev_info->d_ext_ufs_feature_sup = - get_unaligned_be32(desc_buf + - DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); - if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP)) + if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP)) goto wb_disabled; /* - * WB may be supported but not configured while provisioning. - * The spec says, in dedicated wb buffer mode, - * a max of 1 lun would have wb buffer configured. - * Now only shared buffer mode is supported. + * WB may be supported but not configured while provisioning. The spec + * says, in dedicated wb buffer mode, a max of 1 lun would have wb + * buffer configured. */ - dev_info->b_wb_buffer_type = - desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; dev_info->b_presrv_uspc_en = desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; - if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) { - dev_info->d_wb_alloc_units = - get_unaligned_be32(desc_buf + - DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS); - if (!dev_info->d_wb_alloc_units) + if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) { + if (!get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS)) goto wb_disabled; } else { for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { @@ -7734,13 +7768,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba) &hba->pwr_info, sizeof(struct ufs_pa_layer_attr)); hba->clk_scaling.saved_pwr_info.is_valid = true; - if (!hba->devfreq) { - ret = ufshcd_devfreq_init(hba); - if (ret) - goto out; - } - hba->clk_scaling.is_allowed = true; + + ret = ufshcd_devfreq_init(hba); + if (ret) + goto out; + + hba->clk_scaling.is_enabled = true; + ufshcd_init_clk_scaling_sysfs(hba); } ufs_bsg_probe(hba); @@ -7911,10 +7946,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) struct ufs_hba *hba = (struct ufs_hba *)data; int ret; - down(&hba->eh_sem); + down(&hba->host_sem); /* Initialize hba, detect and initialize UFS device */ ret = ufshcd_probe_hba(hba, true); - up(&hba->eh_sem); + up(&hba->host_sem); if (ret) goto out; @@ -7927,7 +7962,6 @@ out: */ if (ret) { pm_runtime_put_sync(hba->dev); - ufshcd_exit_clk_scaling(hba); ufshcd_hba_exit(hba); } } @@ -8339,6 +8373,8 @@ static int ufshcd_hba_init(struct ufs_hba *hba) if (err) goto out_disable_vreg; + ufs_debugfs_hba_init(hba); + hba->is_powered = true; goto out; @@ -8355,12 +8391,13 @@ out: static void ufshcd_hba_exit(struct ufs_hba *hba) { if (hba->is_powered) { + ufshcd_exit_clk_scaling(hba); + ufshcd_exit_clk_gating(hba); + if (hba->eh_wq) + destroy_workqueue(hba->eh_wq); + ufs_debugfs_hba_exit(hba); ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); - ufshcd_suspend_clkscaling(hba); - if (ufshcd_is_clkscaling_supported(hba)) - if (hba->devfreq) - ufshcd_suspend_clkscaling(hba); ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; @@ -8655,11 +8692,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_hold(hba, false); hba->clk_gating.is_suspended = true; - if (hba->clk_scaling.is_allowed) { - cancel_work_sync(&hba->clk_scaling.suspend_work); - cancel_work_sync(&hba->clk_scaling.resume_work); - ufshcd_suspend_clkscaling(hba); - } + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, true); if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && req_link_state == UIC_LINK_ACTIVE_STATE) { @@ -8726,8 +8760,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (ret) goto set_dev_active; - ufshcd_vreg_set_lpm(hba); - disable_clks: /* * Call vendor specific suspend callback. As these callbacks may access @@ -8751,13 +8783,13 @@ disable_clks: hba->clk_gating.state); } + ufshcd_vreg_set_lpm(hba); + /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); goto out; set_link_active: - if (hba->clk_scaling.is_allowed) - ufshcd_resume_clkscaling(hba); ufshcd_vreg_set_hpm(hba); /* * Device hardware reset is required to exit DeepSleep. Also, for @@ -8781,8 +8813,9 @@ set_dev_active: if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ufshcd_disable_auto_bkops(hba); enable_gating: - if (hba->clk_scaling.is_allowed) - ufshcd_resume_clkscaling(hba); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); + hba->clk_gating.is_suspended = false; hba->dev_info.b_rpm_dev_flush_capable = false; ufshcd_clear_ua_wluns(hba); @@ -8819,18 +8852,18 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) old_link_state = hba->uic_link_state; ufshcd_hba_vreg_set_hpm(hba); + ret = ufshcd_vreg_set_hpm(hba); + if (ret) + goto out; + /* Make sure clocks are enabled before accessing controller */ ret = ufshcd_setup_clocks(hba, true); if (ret) - goto out; + goto disable_vreg; /* enable the host irq as host controller would be active soon */ ufshcd_enable_irq(hba); - ret = ufshcd_vreg_set_hpm(hba); - if (ret) - goto disable_irq_and_vops_clks; - /* * Call vendor specific resume callback. As these callbacks may access * vendor specific host controller register space call them when the @@ -8838,7 +8871,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) */ ret = ufshcd_vops_resume(hba, pm_op); if (ret) - goto disable_vreg; + goto disable_irq_and_vops_clks; /* For DeepSleep, the only supported option is to have the link off */ WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); @@ -8885,8 +8918,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba->clk_gating.is_suspended = false; - if (hba->clk_scaling.is_allowed) - ufshcd_resume_clkscaling(hba); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_clk_scaling_suspend(hba, false); /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); @@ -8907,18 +8940,16 @@ set_old_link_state: ufshcd_link_state_transition(hba, old_link_state, 0); vendor_suspend: ufshcd_vops_suspend(hba, pm_op); -disable_vreg: - ufshcd_vreg_set_lpm(hba); disable_irq_and_vops_clks: ufshcd_disable_irq(hba); - if (hba->clk_scaling.is_allowed) - ufshcd_suspend_clkscaling(hba); ufshcd_setup_clocks(hba, false); if (ufshcd_is_clkgating_allowed(hba)) { hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); } +disable_vreg: + ufshcd_vreg_set_lpm(hba); out: hba->pm_op_in_progress = 0; if (ret) @@ -8939,8 +8970,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - down(&hba->eh_sem); - if (!hba || !hba->is_powered) + if (!hba) { + early_suspend = true; + return 0; + } + + down(&hba->host_sem); + + if (!hba->is_powered) return 0; if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == @@ -8972,7 +9009,7 @@ out: if (!ret) hba->is_sys_suspended = true; else - up(&hba->eh_sem); + up(&hba->host_sem); return ret; } EXPORT_SYMBOL(ufshcd_system_suspend); @@ -8989,9 +9026,12 @@ int ufshcd_system_resume(struct ufs_hba *hba) int ret = 0; ktime_t start = ktime_get(); - if (!hba) { - up(&hba->eh_sem); + if (!hba) return -EINVAL; + + if (unlikely(early_suspend)) { + early_suspend = false; + down(&hba->host_sem); } if (!hba->is_powered || pm_runtime_suspended(hba->dev)) @@ -9008,7 +9048,7 @@ out: hba->curr_dev_pwr_mode, hba->uic_link_state); if (!ret) hba->is_sys_suspended = false; - up(&hba->eh_sem); + up(&hba->host_sem); return ret; } EXPORT_SYMBOL(ufshcd_system_resume); @@ -9100,7 +9140,10 @@ int ufshcd_shutdown(struct ufs_hba *hba) { int ret = 0; - down(&hba->eh_sem); + down(&hba->host_sem); + hba->shutting_down = true; + up(&hba->host_sem); + if (!hba->is_powered) goto out; @@ -9114,7 +9157,6 @@ out: if (ret) dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); hba->is_powered = false; - up(&hba->eh_sem); /* allow force shutdown even in case of errors */ return 0; } @@ -9133,15 +9175,9 @@ void ufshcd_remove(struct ufs_hba *hba) blk_mq_free_tag_set(&hba->tmf_tag_set); blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); - destroy_workqueue(hba->eh_wq); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); - - ufshcd_exit_clk_scaling(hba); - ufshcd_exit_clk_gating(hba); - if (ufshcd_is_clkscaling_supported(hba)) - device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -9309,7 +9345,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); - sema_init(&hba->eh_sem, 1); + sema_init(&hba->host_sem, 1); /* Initialize UIC command mutex */ mutex_init(&hba->uic_cmd_mutex); @@ -9341,7 +9377,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); - goto exit_gating; + goto out_disable; } else { hba->is_irq_enabled = true; } @@ -9349,7 +9385,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) err = scsi_add_host(host, hba->dev); if (err) { dev_err(hba->dev, "scsi_add_host failed\n"); - goto exit_gating; + goto out_disable; } hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); @@ -9432,10 +9468,6 @@ free_cmd_queue: blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); -exit_gating: - ufshcd_exit_clk_scaling(hba); - ufshcd_exit_clk_gating(hba); - destroy_workqueue(hba->eh_wq); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); @@ -9444,6 +9476,20 @@ out_error: } EXPORT_SYMBOL_GPL(ufshcd_init); +static int __init ufshcd_core_init(void) +{ + ufs_debugfs_init(); + return 0; +} + +static void __exit ufshcd_core_exit(void) +{ + ufs_debugfs_exit(); +} + +module_init(ufshcd_core_init); +module_exit(ufshcd_core_exit); + MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |