summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c2
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c6
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h2
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c31
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h26
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c4
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c45
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c269
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c36
-rw-r--r--drivers/scsi/ufs/ufshcd.c1163
-rw-r--r--drivers/scsi/ufs/ufshcd.h82
-rw-r--r--drivers/scsi/ufs/ufshci.h1
16 files changed, 1168 insertions, 510 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 07cf415367b4..2d137953e7b4 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -115,6 +115,7 @@ config SCSI_UFS_MEDIATEK
tristate "Mediatek specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
select PHY_MTK_UFS
+ select RESET_TI_SYSCON
help
This selects the Mediatek specific additions to UFSHCD platform driver.
UFS host on Mediatek needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 13d92043e13b..908ff39c4856 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -323,6 +323,8 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver cdns_ufs_pltfrm_driver = {
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index 67a6a61154b7..ec4589afbc13 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -148,6 +148,8 @@ static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
.runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
.runtime_resume = tc_dwc_g210_pci_runtime_resume,
.runtime_idle = tc_dwc_g210_pci_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index ced9ef4d7c78..4e1ff209b933 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -13,7 +13,7 @@ void __init ufs_debugfs_init(void)
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
}
-void __exit ufs_debugfs_exit(void)
+void ufs_debugfs_exit(void)
{
debugfs_remove_recursive(ufs_debugfs_root);
}
@@ -60,14 +60,14 @@ __acquires(&hba->host_sem)
up(&hba->host_sem);
return -EBUSY;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
return 0;
}
static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
__releases(&hba->host_sem)
{
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
index 3ca29d30460a..97548a3f90eb 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -9,7 +9,7 @@ struct ufs_hba;
#ifdef CONFIG_DEBUG_FS
void __init ufs_debugfs_init(void);
-void __exit ufs_debugfs_exit(void);
+void ufs_debugfs_exit(void);
void ufs_debugfs_hba_init(struct ufs_hba *hba);
void ufs_debugfs_hba_exit(struct ufs_hba *hba);
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 70647eacf195..cf46d6f86e0e 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -107,6 +107,7 @@ enum {
#define CNTR_DIV_VAL 40
+static struct exynos_ufs_drv_data exynos_ufs_drvs;
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
@@ -1048,7 +1049,7 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
exynos_ufs_ungate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
- const unsigned int granularity_tbl[] = {
+ static const unsigned int granularity_tbl[] = {
1, 4, 8, 16, 32, 100
};
int h8_time = attr->pa_hibern8time *
@@ -1231,8 +1232,32 @@ static int exynos_ufs_remove(struct platform_device *pdev)
return 0;
}
-struct exynos_ufs_drv_data exynos_ufs_drvs = {
+static struct exynos_ufs_uic_attr exynos7_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x30103,
+};
+static struct exynos_ufs_drv_data exynos_ufs_drvs = {
.compatible = "samsung,exynos7-ufs",
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
@@ -1267,6 +1292,8 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver exynos_ufs_pltform = {
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index 06ee565f7eb0..67505fe32ebf 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -245,30 +245,4 @@ static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE);
}
-struct exynos_ufs_drv_data exynos_ufs_drvs;
-
-struct exynos_ufs_uic_attr exynos7_uic_attr = {
- .tx_trailingclks = 0x10,
- .tx_dif_p_nsec = 3000000, /* unit: ns */
- .tx_dif_n_nsec = 1000000, /* unit: ns */
- .tx_high_z_cnt_nsec = 20000, /* unit: ns */
- .tx_base_unit_nsec = 100000, /* unit: ns */
- .tx_gran_unit_nsec = 4000, /* unit: ns */
- .tx_sleep_cnt = 1000, /* unit: ns */
- .tx_min_activatetime = 0xa,
- .rx_filler_enable = 0x2,
- .rx_dif_p_nsec = 1000000, /* unit: ns */
- .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
- .rx_base_unit_nsec = 100000, /* unit: ns */
- .rx_gran_unit_nsec = 4000, /* unit: ns */
- .rx_sleep_cnt = 1280, /* unit: ns */
- .rx_stall_cnt = 320, /* unit: ns */
- .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
- .pa_dbg_option_suite = 0x30103,
-};
#endif /* _UFS_EXYNOS_H_ */
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index d0626773eb38..5b147a48161b 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -400,7 +400,7 @@ static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- if (ufshcd_is_runtime_pm(pm_op))
+ if (pm_op == UFS_RUNTIME_PM)
return 0;
if (host->in_suspend) {
@@ -577,6 +577,8 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_hisi_pltform = {
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0a84ec9e7cea..d2c251628a05 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -822,12 +822,10 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
- /* configure auto-hibern8 timer to 10ms */
- if (ufshcd_is_auto_hibern8_supported(hba)) {
- ufshcd_auto_hibern8_update(hba,
- FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
- }
+ /* will be configured during probe hba */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
ufs_mtk_setup_clk_gating(hba);
@@ -858,6 +856,9 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
+ /* disable hba before device reset */
+ ufshcd_hba_stop(hba);
+
ufs_mtk_device_reset_ctrl(0, res);
/*
@@ -1084,12 +1085,42 @@ static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
+ struct device_node *reset_node;
+ struct platform_device *reset_pdev;
+ struct device_link *link;
+
+ reset_node = of_find_compatible_node(NULL, NULL,
+ "ti,syscon-reset");
+ if (!reset_node) {
+ dev_notice(dev, "find ti,syscon-reset fail\n");
+ goto skip_reset;
+ }
+ reset_pdev = of_find_device_by_node(reset_node);
+ if (!reset_pdev) {
+ dev_notice(dev, "find reset_pdev fail\n");
+ goto skip_reset;
+ }
+ link = device_link_add(dev, &reset_pdev->dev,
+ DL_FLAG_AUTOPROBE_CONSUMER);
+ if (!link) {
+ dev_notice(dev, "add reset device_link fail\n");
+ goto skip_reset;
+ }
+ /* supplier is not probed */
+ if (link->status == DL_STATE_DORMANT) {
+ err = -EPROBE_DEFER;
+ goto out;
+ }
+skip_reset:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+
+out:
if (err)
dev_info(dev, "probe failed %d\n", err);
+ of_node_put(reset_node);
return err;
}
@@ -1114,6 +1145,8 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_mtk_pltform = {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2a3dd21da6a6..9b1d18d7c9bb 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1551,6 +1551,8 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_qcom_pltform = {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 5d0e98a05ada..52bd807f7940 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -245,9 +245,9 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
res = ufshcd_wb_toggle(hba, wb_enable);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return res < 0 ? res : count;
@@ -278,6 +278,242 @@ static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
+static ssize_t monitor_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
+}
+
+static ssize_t monitor_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value, flags;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (value == hba->monitor.enabled)
+ goto out_unlock;
+
+ if (!value) {
+ memset(&hba->monitor, 0, sizeof(hba->monitor));
+ } else {
+ hba->monitor.enabled = true;
+ hba->monitor.enabled_ts = ktime_get();
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t monitor_chunk_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
+}
+
+static ssize_t monitor_chunk_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value, flags;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* Only allow chunk size change when monitor is disabled */
+ if (!hba->monitor.enabled)
+ hba->monitor.chunk_size = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t read_total_sectors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
+}
+
+static ssize_t read_total_busy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.total_busy[READ]));
+}
+
+static ssize_t read_nr_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
+}
+
+static ssize_t read_req_latency_avg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
+ m->nr_req[READ]));
+}
+
+static ssize_t read_req_latency_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_max[READ]));
+}
+
+static ssize_t read_req_latency_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_min[READ]));
+}
+
+static ssize_t read_req_latency_sum_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_sum[READ]));
+}
+
+static ssize_t write_total_sectors_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
+}
+
+static ssize_t write_total_busy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.total_busy[WRITE]));
+}
+
+static ssize_t write_nr_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
+}
+
+static ssize_t write_req_latency_avg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
+ m->nr_req[WRITE]));
+}
+
+static ssize_t write_req_latency_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_max[WRITE]));
+}
+
+static ssize_t write_req_latency_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_min[WRITE]));
+}
+
+static ssize_t write_req_latency_sum_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_sum[WRITE]));
+}
+
+static DEVICE_ATTR_RW(monitor_enable);
+static DEVICE_ATTR_RW(monitor_chunk_size);
+static DEVICE_ATTR_RO(read_total_sectors);
+static DEVICE_ATTR_RO(read_total_busy);
+static DEVICE_ATTR_RO(read_nr_requests);
+static DEVICE_ATTR_RO(read_req_latency_avg);
+static DEVICE_ATTR_RO(read_req_latency_max);
+static DEVICE_ATTR_RO(read_req_latency_min);
+static DEVICE_ATTR_RO(read_req_latency_sum);
+static DEVICE_ATTR_RO(write_total_sectors);
+static DEVICE_ATTR_RO(write_total_busy);
+static DEVICE_ATTR_RO(write_nr_requests);
+static DEVICE_ATTR_RO(write_req_latency_avg);
+static DEVICE_ATTR_RO(write_req_latency_max);
+static DEVICE_ATTR_RO(write_req_latency_min);
+static DEVICE_ATTR_RO(write_req_latency_sum);
+
+static struct attribute *ufs_sysfs_monitor_attrs[] = {
+ &dev_attr_monitor_enable.attr,
+ &dev_attr_monitor_chunk_size.attr,
+ &dev_attr_read_total_sectors.attr,
+ &dev_attr_read_total_busy.attr,
+ &dev_attr_read_nr_requests.attr,
+ &dev_attr_read_req_latency_avg.attr,
+ &dev_attr_read_req_latency_max.attr,
+ &dev_attr_read_req_latency_min.attr,
+ &dev_attr_read_req_latency_sum.attr,
+ &dev_attr_write_total_sectors.attr,
+ &dev_attr_write_total_busy.attr,
+ &dev_attr_write_nr_requests.attr,
+ &dev_attr_write_req_latency_avg.attr,
+ &dev_attr_write_req_latency_max.attr,
+ &dev_attr_write_req_latency_min.attr,
+ &dev_attr_write_req_latency_sum.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_monitor_group = {
+ .name = "monitor",
+ .attrs = ufs_sysfs_monitor_attrs,
+};
+
static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
u8 desc_index,
@@ -297,10 +533,10 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
@@ -678,7 +914,7 @@ static ssize_t _name##_show(struct device *dev, \
up(&hba->host_sem); \
return -ENOMEM; \
} \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
@@ -695,7 +931,7 @@ static ssize_t _name##_show(struct device *dev, \
goto out; \
ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
kfree(desc_buf); \
up(&hba->host_sem); \
return ret; \
@@ -724,8 +960,8 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
{
- return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
- (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+ return idn >= QUERY_FLAG_IDN_WB_EN &&
+ idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
}
#define UFS_FLAG(_name, _uname) \
@@ -744,10 +980,10 @@ static ssize_t _name##_show(struct device *dev, \
} \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
@@ -793,8 +1029,8 @@ static const struct attribute_group ufs_sysfs_flags_group = {
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
- return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
- (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+ return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
+ idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
#define UFS_ATTRIBUTE(_name, _uname) \
@@ -813,10 +1049,10 @@ static ssize_t _name##_show(struct device *dev, \
} \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
@@ -881,6 +1117,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
+ &ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
&ufs_sysfs_geometry_descriptor_group,
@@ -964,10 +1201,10 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 5b2bc1a6f922..39bf204c6ec3 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -97,7 +97,7 @@ static int ufs_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
msgcode = bsg_request->msgcode;
switch (msgcode) {
@@ -106,7 +106,7 @@ static int ufs_bsg_request(struct bsg_job *job)
ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
&desc_len, desc_op);
if (ret) {
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
goto out;
}
@@ -138,7 +138,7 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (!desc_buff)
goto out;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 23ee828747e2..e6c334bfb4c2 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -410,29 +410,6 @@ static int ufshcd_pci_resume(struct device *dev)
return ufshcd_system_resume(dev_get_drvdata(dev));
}
-/**
- * ufshcd_pci_poweroff - suspend-to-disk poweroff function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_poweroff(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int spm_lvl = hba->spm_lvl;
- int ret;
-
- /*
- * For poweroff we need to set the UFS device to PowerDown mode.
- * Force spm_lvl to ensure that.
- */
- hba->spm_lvl = 5;
- ret = ufshcd_system_suspend(hba);
- hba->spm_lvl = spm_lvl;
- return ret;
-}
-
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -533,17 +510,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
- .freeze = ufshcd_pci_suspend,
- .thaw = ufshcd_pci_resume,
- .poweroff = ufshcd_pci_poweroff,
- .restore = ufshcd_pci_resume,
-#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+#endif
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 72fd41bfbd54..b87ff68aa9aa 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -16,6 +16,7 @@
#include <linux/bitfield.h>
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
+#include <scsi/scsi_driver.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -24,6 +25,7 @@
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
+#include "../sd.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -77,6 +79,8 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -157,17 +161,17 @@ enum {
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+ [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
/*
* For DeepSleep, the link is first put in hibern8 and then off.
* Leaving the link in hibern8 is not supported.
*/
- {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
+ [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
@@ -298,11 +302,17 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
+ if (str_t == UFS_CMD_SEND)
+ header = &rq->header;
+ else
+ header = &hba->lrb[tag].ucd_rsp_ptr->header;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -361,41 +371,40 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- sector_t lba = -1;
+ u64 lba = -1;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
+ if (!cmd)
+ return;
+
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
return;
}
- if (cmd) { /* data phase exists */
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- opcode = cmd->cmnd[0];
- if ((opcode == READ_10) || (opcode == WRITE_10)) {
- /*
- * Currently we only fully trace read(10) and write(10)
- * commands
- */
- if (cmd->request && cmd->request->bio)
- lba = cmd->request->bio->bi_iter.bi_sector;
- transfer_len = be32_to_cpu(
- lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
- if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
- } else if (opcode == UNMAP) {
- if (cmd->request) {
- lba = scsi_get_lba(cmd);
- transfer_len = blk_rq_bytes(cmd->request);
- }
- }
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ opcode = cmd->cmnd[0];
+ lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+
+ if (opcode == READ_10 || opcode == WRITE_10) {
+ /*
+ * Currently we only fully trace read(10) and write(10) commands
+ */
+ transfer_len =
+ be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ if (opcode == WRITE_10)
+ group_id = lrbp->cmd->cmnd[6];
+ } else if (opcode == UNMAP) {
+ /*
+ * The number of Bytes to be unmapped beginning with the lba.
+ */
+ transfer_len = blk_rq_bytes(cmd->request);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -755,7 +764,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
*/
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
{
- __clear_bit(tag, &hba->outstanding_reqs);
+ clear_bit(tag, &hba->outstanding_reqs);
}
/**
@@ -1551,7 +1560,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value == hba->clk_scaling.is_enabled)
goto out;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
hba->clk_scaling.is_enabled = value;
@@ -1567,7 +1576,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
}
ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return err ? err : count;
@@ -1981,15 +1990,19 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
+ unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
+ }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2005,22 +2018,91 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.active_reqs--;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static inline int ufshcd_monitor_opcode2dir(u8 opcode)
+{
+ if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
+ return READ;
+ else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
+ return WRITE;
+ else
+ return -EINVAL;
+}
+
+static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return (m->enabled && lrbp && lrbp->cmd &&
+ (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
+}
+
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
+ hba->monitor.busy_start_ts[dir] = ktime_get();
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
+ struct request *req = lrbp->cmd->request;
+ struct ufs_hba_monitor *m = &hba->monitor;
+ ktime_t now, inc, lat;
+
+ now = lrbp->compl_time_stamp;
+ inc = ktime_sub(now, m->busy_start_ts[dir]);
+ m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
+ m->nr_sec_rw[dir] += blk_rq_sectors(req);
+
+ /* Update latencies */
+ m->nr_req[dir]++;
+ lat = ktime_sub(now, lrbp->issue_time_stamp);
+ m->lat_sum[dir] += lat;
+ if (m->lat_max[dir] < lat || !m->lat_max[dir])
+ m->lat_max[dir] = lat;
+ if (m->lat_min[dir] > lat || !m->lat_min[dir])
+ m->lat_min[dir] = lat;
+
+ m->nr_queued[dir]--;
+ /* Push forward the busy start of monitor */
+ m->busy_start_ts[dir] = now;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
@@ -2036,8 +2118,21 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_start_monitor(hba, lrbp);
+ if (ufshcd_has_utrlcnr(hba)) {
+ set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
/* Make sure that doorbell is committed immediately */
wmb();
}
@@ -2565,6 +2660,17 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
+static inline bool is_rpmb_wlun(struct scsi_device *sdev)
+{
+ return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
+}
+
+static inline bool is_device_wlun(struct scsi_device *sdev)
+{
+ return sdev->lun ==
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
+}
+
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@@ -2597,7 +2703,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufshcd_lrb *lrbp;
struct ufs_hba *hba;
- unsigned long flags;
int tag;
int err = 0;
@@ -2614,6 +2719,43 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
+ /*
+ * pm_runtime_get_sync() is used at error handling preparation
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
+ * PM ops, it can never be finished if we let SCSI layer keep
+ * retrying it, which gets err handler stuck forever. Neither
+ * can we let the scsi cmd pass through, because UFS is in bad
+ * state, the scsi cmd may eventually time out, which will get
+ * err handler blocked for too long. So, just fail the scsi cmd
+ * sent from PM ops, err handler can recover PM error anyways.
+ */
+ if (hba->pm_op_in_progress) {
+ hba->force_reset = true;
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out;
+ }
+ fallthrough;
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out;
+ }
+
hba->req_abort_count = 0;
err = ufshcd_hold(hba, true);
@@ -2624,8 +2766,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
if (hba->pm_op_in_progress)
set_host_byte(cmd, DID_BAD_TARGET);
else
@@ -2634,6 +2775,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->sense_bufflen = UFS_SENSE_SIZE;
@@ -2657,51 +2799,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED_FATAL:
- /*
- * pm_runtime_get_sync() is used at error handling preparation
- * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
- * PM ops, it can never be finished if we let SCSI layer keep
- * retrying it, which gets err handler stuck forever. Neither
- * can we let the scsi cmd pass through, because UFS is in bad
- * state, the scsi cmd may eventually time out, which will get
- * err handler blocked for too long. So, just fail the scsi cmd
- * sent from PM ops, err handler can recover PM error anyways.
- */
- if (hba->pm_op_in_progress) {
- hba->force_reset = true;
- set_host_byte(cmd, DID_BAD_TARGET);
- goto out_compl_cmd;
- }
- fallthrough;
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out_compl_cmd;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- goto out_compl_cmd;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- goto out_compl_cmd;
- }
ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto out;
-
-out_compl_cmd:
- scsi_dma_unmap(lrbp->cmd);
- lrbp->cmd = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
- if (!err)
- cmd->scsi_done(cmd);
out:
up_read(&hba->clk_scaling_lock);
return err;
@@ -2735,7 +2833,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
- * wait for for h/w to clear corresponding bit in door-bell.
+ * wait for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
err = ufshcd_wait_for_register(hba,
@@ -2856,7 +2954,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
int err;
int tag;
struct completion wait;
- unsigned long flags;
down_read(&hba->clk_scaling_lock);
@@ -2876,34 +2973,30 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
- goto out_put_tag;
+ goto out;
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
-
-out:
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-out_put_tag:
+out:
blk_put_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
@@ -4101,12 +4194,13 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (update && !pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
+ if (update &&
+ !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
+ ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
- pm_runtime_put(hba->dev);
+ ufshcd_rpm_put_sync(hba);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
@@ -4420,7 +4514,7 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+void ufshcd_hba_stop(struct ufs_hba *hba)
{
unsigned long flags;
int err;
@@ -4439,6 +4533,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
+EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
/**
* ufshcd_hba_execute_hce - initialize the controller
@@ -4804,6 +4899,43 @@ static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
}
/**
+ * ufshcd_setup_links - associate link b/w device wlun and other luns
+ * @sdev: pointer to SCSI device
+ * @hba: pointer to ufs hba
+ */
+static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct device_link *link;
+
+ /*
+ * Device wlun is the supplier & rest of the luns are consumers.
+ * This ensures that device wlun suspends after all other luns.
+ */
+ if (hba->sdev_ufs_device) {
+ link = device_link_add(&sdev->sdev_gendev,
+ &hba->sdev_ufs_device->sdev_gendev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
+ dev_name(&hba->sdev_ufs_device->sdev_gendev));
+ return;
+ }
+ hba->luns_avail--;
+ /* Ignore REPORT_LUN wlun probing */
+ if (hba->luns_avail == 1) {
+ ufshcd_rpm_put(hba);
+ return;
+ }
+ } else {
+ /*
+ * Device wlun is probed. The assumption is that WLUNs are
+ * scanned before other LUNs.
+ */
+ hba->luns_avail--;
+ }
+}
+
+/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
@@ -4834,6 +4966,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
ufshcd_get_lu_power_on_wp_status(hba, sdev);
+ ufshcd_setup_links(hba, sdev);
+
return 0;
}
@@ -4865,8 +4999,13 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
-
- if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ /*
+ * Block runtime-pm until all consumers are added.
+ * Refer ufshcd_setup_links().
+ */
+ if (is_device_wlun(sdev))
+ pm_runtime_get_noresume(&sdev->sdev_gendev);
+ else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
@@ -4982,15 +5121,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
*/
if (!hba->pm_op_in_progress &&
!ufshcd_eh_in_progress(hba) &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
- schedule_work(&hba->eeh_work)) {
- /*
- * Prevent suspend once eeh_work is scheduled
- * to avoid deadlock between ufshcd_suspend
- * and exception event handler.
- */
- pm_runtime_get_noresume(hba->dev);
- }
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ /* Flushed in suspend */
+ schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5037,6 +5170,24 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return result;
}
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
+ u32 intr_mask)
+{
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_auto_hibern8_enabled(hba))
+ return false;
+
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
+ return false;
+
+ if (hba->active_uic_cmd &&
+ (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
+ hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
+ return false;
+
+ return true;
+}
+
/**
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
@@ -5050,6 +5201,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
+ spin_lock(hba->host->host_lock);
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
+ hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
@@ -5070,6 +5225,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
UFS_CMD_COMP);
+ spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5088,11 +5244,14 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ if (!test_and_clear_bit(index, &hba->outstanding_reqs))
+ continue;
lrbp = &hba->lrb[index];
- lrbp->in_use = false;
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
@@ -5101,7 +5260,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->cmd = NULL;
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
- __ufshcd_release(hba);
+ ufshcd_release(hba);
update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
@@ -5112,28 +5271,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
update_scaling = true;
}
}
- if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
- hba->clk_scaling.active_reqs--;
+ if (update_scaling)
+ ufshcd_clk_scaling_update_busy(hba);
}
-
- /* clear corresponding bits of completed commands */
- hba->outstanding_reqs ^= completed_reqs;
-
- ufshcd_clk_scaling_update_busy(hba);
}
/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_trc_handler - handle transfer requests completion
* @hba: per adapter instance
+ * @use_utrlcnr: get completed requests from UTRLCNR
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
{
- unsigned long completed_reqs;
- u32 tr_doorbell;
+ unsigned long completed_reqs = 0;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5146,8 +5300,24 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+ if (use_utrlcnr) {
+ u32 utrlcnr;
+
+ utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
+ if (utrlcnr) {
+ ufshcd_writel(hba, utrlcnr,
+ REG_UTP_TRANSFER_REQ_LIST_COMPL);
+ completed_reqs = utrlcnr;
+ }
+ } else {
+ unsigned long flags;
+ u32 tr_doorbell;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
if (completed_reqs) {
__ufshcd_transfer_req_compl(hba, completed_reqs);
@@ -5589,8 +5759,8 @@ static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
* after a certain delay to recheck the threshold by next runtime
* suspend.
*/
- pm_runtime_get_sync(hba->dev);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
+ ufshcd_rpm_put_sync(hba);
}
/**
@@ -5607,7 +5777,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
- pm_runtime_get_sync(hba->dev);
ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
@@ -5624,21 +5793,13 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
- /*
- * pm_runtime_get_noresume is called while scheduling
- * eeh_work to avoid suspend racing with exception work.
- * Hence decrement usage counter using pm_runtime_put_noidle
- * to allow suspend on completion of exception event handler.
- */
- pm_runtime_put_noidle(hba->dev);
- pm_runtime_put(hba->dev);
return;
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba);
+ ufshcd_trc_handler(hba, false);
ufshcd_tmc_handler(hba);
}
@@ -5756,12 +5917,13 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
- pm_runtime_get_sync(hba->dev);
- if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
+ ufshcd_rpm_get_sync(hba);
+ if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
+ hba->is_sys_suspended) {
enum ufs_pm_op pm_op;
/*
- * Don't assume anything of pm_runtime_get_sync(), if
+ * Don't assume anything of resume, if
* resume fails, irq and clocks can be OFF, and powers
* can be OFF or in LPM.
*/
@@ -5797,12 +5959,13 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
ufshcd_clear_ua_wluns(hba);
- pm_runtime_put(hba->dev);
+ ufshcd_rpm_put(hba);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
return (!hba->is_powered || hba->shutting_down ||
+ !hba->sdev_ufs_device ||
hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
@@ -5818,14 +5981,18 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
hba->is_sys_suspended = false;
/*
- * Set RPM status of hba device to RPM_ACTIVE,
+ * Set RPM status of wlun device to RPM_ACTIVE,
* this also clears its runtime error.
*/
- ret = pm_runtime_set_active(hba->dev);
+ ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
+
+ /* hba device might have a runtime error otherwise */
+ if (ret)
+ ret = pm_runtime_set_active(hba->dev);
/*
- * If hba device had runtime error, we also need to resume those
- * scsi devices under hba in case any of them has failed to be
- * resumed due to hba runtime resume failure. This is to unblock
+ * If wlun device had runtime error, we also need to resume those
+ * consumer scsi devices in case any of them has failed to be
+ * resumed due to supplier runtime resume failure. This is to unblock
* blk_queue_enter in case there are bios waiting inside it.
*/
if (!ret) {
@@ -5887,13 +6054,11 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba);
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_RESET;
-
- /* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
-
/*
* A full reset and restore might have happened after preparation
* is finished, double check whether we should stop.
@@ -5976,12 +6141,11 @@ static void ufshcd_err_handler(struct work_struct *work)
}
lock_skip_pending_xfer_clear:
- spin_lock_irqsave(hba->host->host_lock, flags);
-
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba);
- hba->silence_err_logs = false;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = false;
if (err_xfer || err_tm) {
needs_reset = true;
goto do_reset;
@@ -6014,19 +6178,6 @@ lock_skip_pending_xfer_clear:
do_reset:
/* Fatal errors need reset */
if (needs_reset) {
- unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
-
- /*
- * ufshcd_reset_and_restore() does the link reinitialization
- * which will need atleast one empty doorbell slot to send the
- * device management commands (NOP and query commands).
- * If there is no slot empty at this moment then free up last
- * slot forcefully.
- */
- if (hba->outstanding_reqs == max_doorbells)
- __ufshcd_transfer_req_compl(hba,
- (1UL << (hba->nutrs - 1)));
-
hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
@@ -6144,37 +6295,23 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
return retval;
}
-static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
- u32 intr_mask)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba) ||
- !ufshcd_is_auto_hibern8_enabled(hba))
- return false;
-
- if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
- return false;
-
- if (hba->active_uic_cmd &&
- (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
- hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
- return false;
-
- return true;
-}
-
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ * @intr_status: interrupt status generated by the controller
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
{
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(hba->host->host_lock);
+ hba->errors |= UFSHCD_ERROR_MASK & intr_status;
+
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
hba->errors);
@@ -6229,6 +6366,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ hba->errors = 0;
+ hba->uic_error = 0;
+ spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6263,13 +6403,17 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
+ unsigned long flags;
struct request_queue *q = hba->tmf_queue;
struct ctm_info ci = {
.hba = hba,
- .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
};
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
}
@@ -6286,22 +6430,17 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
- hba->errors = UFSHCD_ERROR_MASK & intr_status;
-
- if (ufshcd_is_auto_hibern8_error(hba, intr_status))
- hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
-
- if (hba->errors)
- retval |= ufshcd_check_errors(hba);
-
if (intr_status & UFSHCD_UIC_MASK)
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
+ if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
+ retval |= ufshcd_check_errors(hba, intr_status);
+
if (intr_status & UTP_TASK_REQ_COMPL)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
return retval;
}
@@ -6322,7 +6461,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
hba->ufs_stats.last_intr_ts = ktime_get();
@@ -6344,7 +6482,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
}
if (enabled_intr_status && retval == IRQ_NONE &&
- !ufshcd_eh_in_progress(hba)) {
+ (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
+ hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
@@ -6353,7 +6492,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
- spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6530,7 +6668,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
int err = 0;
int tag;
struct completion wait;
- unsigned long flags;
u8 upiu_flags;
down_read(&hba->clk_scaling_lock);
@@ -6543,13 +6680,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->sense_bufflen = 0;
@@ -6585,12 +6722,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -6616,6 +6752,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
blk_put_request(req);
@@ -6709,7 +6847,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
u32 pos;
int err;
u8 resp = 0xF, lun;
- unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
@@ -6728,11 +6865,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
+ __ufshcd_transfer_req_compl(hba, pos);
}
}
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- spin_unlock_irqrestore(host->host_lock, flags);
out:
hba->req_abort_count = 0;
@@ -6909,20 +7044,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will fail, due to spec violation, scsi err handling next step
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
- * the lrb taken by this cmd and mark the lrb as in_use, then
- * queue the eh_work and bail.
+ * the lrb taken by this cmd and re-set it in outstanding_reqs,
+ * then queue the eh_work and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ set_bit(tag, &hba->outstanding_reqs);
spin_lock_irqsave(host->host_lock, flags);
- if (lrbp->cmd) {
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
- __set_bit(tag, &hba->outstanding_reqs);
- lrbp->in_use = true;
- hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
- }
-
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
goto out;
}
@@ -6935,9 +7066,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
if (!err) {
cleanup:
- spin_lock_irqsave(host->host_lock, flags);
__ufshcd_transfer_req_compl(hba, (1UL << tag));
- spin_unlock_irqrestore(host->host_lock, flags);
out:
err = SUCCESS;
} else {
@@ -6967,19 +7096,15 @@ out:
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
int err;
- unsigned long flags;
/*
* Stop the host controller and complete the requests
* cleared by h/w
*/
ufshcd_hba_stop(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
ufshcd_set_clk_freq(hba, true);
@@ -7249,7 +7374,6 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
hba->sdev_ufs_device = NULL;
goto out;
}
- ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
@@ -7413,6 +7537,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
+ desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
+
ufs_fixup_device_setup(hba);
ufshcd_wb_probe(hba, desc_buf);
@@ -7890,6 +8017,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
hba->wlun_dev_clr_ua = true;
+ hba->wlun_rpmb_clr_ua = true;
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -8475,7 +8603,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- ufshcd_clear_ua_wluns(hba);
+ if (hba->wlun_dev_clr_ua)
+ ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
cmd[4] = pwr_mode << 4;
@@ -8490,7 +8619,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) == DRIVER_SENSE)
+ if (ret > 0 && scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
@@ -8650,23 +8779,7 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
ufshcd_setup_hba_vreg(hba, true);
}
-/**
- * ufshcd_suspend - helper function for suspend operations
- * @hba: per adapter instance
- * @pm_op: desired low power operation type
- *
- * This function will try to put the UFS device and link into low power
- * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
- * (System PM level).
- *
- * If this function is called during shutdown, it will make sure that
- * both UFS device and UFS link is powered off.
- *
- * NOTE: UFS device & link must be active before we enter in this function.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret = 0;
int check_for_bkops;
@@ -8674,9 +8787,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
- hba->pm_op_in_progress = 1;
- if (!ufshcd_is_shutdown_pm(pm_op)) {
- pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->pm_op_in_progress = true;
+ if (pm_op != UFS_SHUTDOWN_PM) {
+ pm_lvl = pm_op == UFS_RUNTIME_PM ?
hba->rpm_lvl : hba->spm_lvl;
req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
@@ -8697,20 +8810,20 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
- goto disable_clks;
+ goto vops_suspend;
}
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto enable_gating;
+ goto enable_scaling;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto enable_gating;
+ goto enable_scaling;
}
- if (ufshcd_is_runtime_pm(pm_op)) {
+ if (pm_op == UFS_RUNTIME_PM) {
if (ufshcd_can_autobkops_during_suspend(hba)) {
/*
* The device is idle with no requests in the queue,
@@ -8719,7 +8832,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_urgent_bkops(hba);
if (ret)
- goto enable_gating;
+ goto enable_scaling;
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
@@ -8740,14 +8853,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
flush_work(&hba->eeh_work);
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
- if (!ufshcd_is_runtime_pm(pm_op))
+ if (pm_op != UFS_RUNTIME_PM)
/* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba);
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
if (ret)
- goto enable_gating;
+ goto enable_scaling;
}
}
@@ -8760,7 +8873,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_dev_active;
-disable_clks:
+vops_suspend:
/*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
@@ -8769,28 +8882,9 @@ disable_clks:
ret = ufshcd_vops_suspend(hba, pm_op);
if (ret)
goto set_link_active;
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
-
- ufshcd_setup_clocks(hba, false);
-
- if (ufshcd_is_clkgating_allowed(hba)) {
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- }
-
- ufshcd_vreg_set_lpm(hba);
-
- /* Put the host controller in low power mode if possible */
- ufshcd_hba_vreg_set_lpm(hba);
goto out;
set_link_active:
- ufshcd_vreg_set_hpm(hba);
/*
* Device hardware reset is required to exit DeepSleep. Also, for
* DeepSleep, the link is off so host reset and restore will be done
@@ -8812,57 +8906,32 @@ set_dev_active:
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
-enable_gating:
+enable_scaling:
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
- ufshcd_clear_ua_wluns(hba);
- ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
}
- hba->pm_op_in_progress = 0;
-
- if (ret)
- ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
+ if (ret) {
+ ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+ }
+ hba->pm_op_in_progress = false;
return ret;
}
-/**
- * ufshcd_resume - helper function for resume operations
- * @hba: per adapter instance
- * @pm_op: runtime PM or system PM
- *
- * This function basically brings the UFS device, UniPro link and controller
- * to active state.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
- enum uic_link_state old_link_state;
-
- hba->pm_op_in_progress = 1;
- old_link_state = hba->uic_link_state;
-
- ufshcd_hba_vreg_set_hpm(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto out;
-
- /* Make sure clocks are enabled before accessing controller */
- ret = ufshcd_setup_clocks(hba, true);
- if (ret)
- goto disable_vreg;
+ enum uic_link_state old_link_state = hba->uic_link_state;
- /* enable the host irq as host controller would be active soon */
- ufshcd_enable_irq(hba);
+ hba->pm_op_in_progress = true;
/*
* Call vendor specific resume callback. As these callbacks may access
@@ -8871,7 +8940,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
- goto disable_irq_and_vops_clks;
+ goto out;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
@@ -8919,42 +8988,217 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
- hba->clk_gating.is_suspended = false;
-
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
if (hba->dev_info.b_rpm_dev_flush_capable) {
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
- ufshcd_clear_ua_wluns(hba);
-
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
goto out;
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_irq_and_vops_clks:
+out:
+ if (ret)
+ ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+ hba->pm_op_in_progress = false;
+ return ret;
+}
+
+static int ufshcd_wl_runtime_suspend(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+
+ trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+static int ufshcd_wl_runtime_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+
+ trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_wl_suspend(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+ down(&hba->host_sem);
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+
+ ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
+ if (ret) {
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+ up(&hba->host_sem);
+ }
+
+out:
+ if (!ret)
+ hba->is_sys_suspended = true;
+ trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+static int ufshcd_wl_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+
+ ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+out:
+ trace_ufshcd_wl_resume(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
+ up(&hba->host_sem);
+ return ret;
+}
+#endif
+
+static void ufshcd_wl_shutdown(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
+ /* Turn on everything while shutting down */
+ ufshcd_rpm_get_sync(hba);
+ scsi_device_quiesce(sdev);
+ shost_for_each_device(sdev, hba->host) {
+ if (sdev == hba->sdev_ufs_device)
+ continue;
+ scsi_device_quiesce(sdev);
+ }
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ *
+ * This function will put disable irqs, turn off clocks
+ * and set vreg and hba-vreg in lpm mode.
+ */
+static int ufshcd_suspend(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->is_powered)
+ return 0;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
ufshcd_disable_irq(hba);
- ufshcd_setup_clocks(hba, false);
+ ret = ufshcd_setup_clocks(hba, false);
+ if (ret) {
+ ufshcd_enable_irq(hba);
+ return ret;
+ }
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
+
+ ufshcd_vreg_set_lpm(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ *
+ * This function basically turns on the regulators, clocks and
+ * irqs of the hba.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->is_powered)
+ return 0;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto out;
+
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto disable_vreg;
+
+ /* enable the host irq as host controller would be active soon */
+ ufshcd_enable_irq(hba);
+ goto out;
+
disable_vreg:
ufshcd_vreg_set_lpm(hba);
out:
- hba->pm_op_in_progress = 0;
if (ret)
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
@@ -8973,44 +9217,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- down(&hba->host_sem);
-
- if (!hba->is_powered)
- return 0;
-
- cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
-
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) &&
- (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state) &&
- pm_runtime_suspended(hba->dev) &&
- !hba->dev_info.b_rpm_dev_flush_capable)
+ if (pm_runtime_suspended(hba->dev))
goto out;
- if (pm_runtime_suspended(hba->dev)) {
- /*
- * UFS device and/or UFS link low power states during runtime
- * suspend seems to be different than what is expected during
- * system suspend. Hence runtime resume the devic & link and
- * let the system suspend low power states to take effect.
- * TODO: If resume takes longer time, we might have optimize
- * it in future by not resuming everything if possible.
- */
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
-
- ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+ ret = ufshcd_suspend(hba);
out:
trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = true;
- else
- up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -9027,21 +9241,16 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba->is_powered || pm_runtime_suspended(hba->dev))
- /*
- * Let the runtime resume take care of resuming
- * if runtime suspended.
- */
+ if (pm_runtime_suspended(hba->dev))
goto out;
- else
- ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+
+ ret = ufshcd_resume(hba);
+
out:
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = false;
- up(&hba->host_sem);
+
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9056,14 +9265,11 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
ktime_t start = ktime_get();
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
-out:
+ ret = ufshcd_suspend(hba);
+
trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9075,33 +9281,19 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
* ufshcd_runtime_resume - runtime resume routine
* @hba: per adapter instance
*
- * This function basically brings the UFS device, UniPro link and controller
+ * This function basically brings controller
* to active state. Following operations are done in this function:
*
* 1. Turn on all the controller related clocks
- * 2. Bring the UniPro link out of Hibernate state
- * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
- * to active state.
- * 4. If auto-bkops is enabled on the device, disable it.
- *
- * So following would be the possible power state after this function return
- * successfully:
- * S1: UFS device in Active state with VCC rail ON
- * UniPro link in Active state
- * All the UFS/UniPro controller clocks are ON
- *
- * Returns 0 for success and non-zero for failure
+ * 2. Turn ON VCC rail
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
ktime_t start = ktime_get();
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
-out:
+ ret = ufshcd_resume(hba);
+
trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9119,30 +9311,20 @@ EXPORT_SYMBOL(ufshcd_runtime_idle);
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
*
- * This function would power off both UFS device and UFS link.
+ * This function would turn off both UFS device and UFS hba
+ * regulators. It would also disable clocks.
*
* Returns 0 always to allow force shutdown even in case of errors.
*/
int ufshcd_shutdown(struct ufs_hba *hba)
{
- int ret = 0;
-
- down(&hba->host_sem);
- hba->shutting_down = true;
- up(&hba->host_sem);
-
- if (!hba->is_powered)
- goto out;
-
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
pm_runtime_get_sync(hba->dev);
- ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+ ufshcd_suspend(hba);
out:
- if (ret)
- dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
@@ -9156,6 +9338,8 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ if (hba->sdev_ufs_device)
+ ufshcd_rpm_get_sync(hba);
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
@@ -9459,15 +9643,180 @@ out_error:
}
EXPORT_SYMBOL_GPL(ufshcd_init);
+void ufshcd_resume_complete(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->complete_put) {
+ ufshcd_rpm_put(hba);
+ hba->complete_put = false;
+ }
+ if (hba->rpmb_complete_put) {
+ ufshcd_rpmb_rpm_put(hba);
+ hba->rpmb_complete_put = false;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
+
+int ufshcd_suspend_prepare(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * SCSI assumes that runtime-pm and system-pm for scsi drivers
+ * are same. And it doesn't wake up the device for system-suspend
+ * if it's runtime suspended. But ufs doesn't follow that.
+ * Refer ufshcd_resume_complete()
+ */
+ if (hba->sdev_ufs_device) {
+ ret = ufshcd_rpm_get_sync(hba);
+ if (ret < 0 && ret != -EACCES) {
+ ufshcd_rpm_put(hba);
+ return ret;
+ }
+ hba->complete_put = true;
+ }
+ if (hba->sdev_rpmb) {
+ ufshcd_rpmb_rpm_get_sync(hba);
+ hba->rpmb_complete_put = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_wl_poweroff(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+ return 0;
+}
+#endif
+
+static int ufshcd_wl_probe(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!is_device_wlun(sdev))
+ return -ENODEV;
+
+ blk_pm_runtime_init(sdev->request_queue, dev);
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_allow(dev);
+
+ return 0;
+}
+
+static int ufshcd_wl_remove(struct device *dev)
+{
+ pm_runtime_forbid(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops ufshcd_wl_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_wl_suspend,
+ .resume = ufshcd_wl_resume,
+ .freeze = ufshcd_wl_suspend,
+ .thaw = ufshcd_wl_resume,
+ .poweroff = ufshcd_wl_poweroff,
+ .restore = ufshcd_wl_resume,
+#endif
+ SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
+};
+
+/*
+ * ufs_dev_wlun_template - describes ufs device wlun
+ * ufs-device wlun - used to send pm commands
+ * All luns are consumers of ufs-device wlun.
+ *
+ * Currently, no sd driver is present for wluns.
+ * Hence the no specific pm operations are performed.
+ * With ufs design, SSU should be sent to ufs-device wlun.
+ * Hence register a scsi driver for ufs wluns only.
+ */
+static struct scsi_driver ufs_dev_wlun_template = {
+ .gendrv = {
+ .name = "ufs_device_wlun",
+ .owner = THIS_MODULE,
+ .probe = ufshcd_wl_probe,
+ .remove = ufshcd_wl_remove,
+ .pm = &ufshcd_wl_pm_ops,
+ .shutdown = ufshcd_wl_shutdown,
+ },
+};
+
+static int ufshcd_rpmb_probe(struct device *dev)
+{
+ return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV;
+}
+
+static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->wlun_rpmb_clr_ua)
+ return 0;
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
+ if (!ret)
+ hba->wlun_rpmb_clr_ua = 0;
+ return ret;
+}
+
+static int ufshcd_rpmb_resume(struct device *dev)
+{
+ struct ufs_hba *hba = wlun_dev_to_hba(dev);
+
+ if (hba->sdev_rpmb)
+ ufshcd_clear_rpmb_uac(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops ufs_rpmb_pm_ops = {
+ SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume)
+};
+
+/* ufs_rpmb_wlun_template - Describes UFS RPMB WLUN. Used only to send UAC. */
+static struct scsi_driver ufs_rpmb_wlun_template = {
+ .gendrv = {
+ .name = "ufs_rpmb_wlun",
+ .owner = THIS_MODULE,
+ .probe = ufshcd_rpmb_probe,
+ .pm = &ufs_rpmb_pm_ops,
+ },
+};
+
static int __init ufshcd_core_init(void)
{
+ int ret;
+
ufs_debugfs_init();
- return 0;
+
+ ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
+ if (ret)
+ goto debugfs_exit;
+
+ ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv);
+ if (ret)
+ goto unregister;
+
+ return ret;
+unregister:
+ scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
+debugfs_exit:
+ ufs_debugfs_exit();
+ return ret;
}
static void __exit ufshcd_core_exit(void)
{
ufs_debugfs_exit();
+ scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv);
+ scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
}
module_init(ufshcd_core_init);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 5eb66a8debc7..c98d540ac044 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -72,6 +72,8 @@ enum ufs_event_type {
UFS_EVT_LINK_STARTUP_FAIL,
UFS_EVT_RESUME_ERR,
UFS_EVT_SUSPEND_ERR,
+ UFS_EVT_WL_SUSP_ERR,
+ UFS_EVT_WL_RES_ERR,
/* abnormal events */
UFS_EVT_DEV_RESET,
@@ -106,10 +108,6 @@ enum ufs_pm_op {
UFS_SHUTDOWN_PM,
};
-#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
-#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
-#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
-
/* Host <-> Device UniPro Link state */
enum uic_link_state {
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
@@ -157,13 +155,13 @@ enum uic_link_state {
* power off.
*/
enum ufs_pm_level {
- UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
- UFS_PM_LVL_6, /* UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE */
+ UFS_PM_LVL_0,
+ UFS_PM_LVL_1,
+ UFS_PM_LVL_2,
+ UFS_PM_LVL_3,
+ UFS_PM_LVL_4,
+ UFS_PM_LVL_5,
+ UFS_PM_LVL_6,
UFS_PM_LVL_MAX
};
@@ -195,7 +193,6 @@ struct ufs_pm_lvl_states {
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
- * @in_use: indicates that this lrb is still in use
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -225,7 +222,6 @@ struct ufshcd_lrb {
#endif
bool req_abort_skip;
- bool in_use;
};
/**
@@ -645,6 +641,25 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
+struct ufs_hba_monitor {
+ unsigned long chunk_size;
+
+ unsigned long nr_sec_rw[2];
+ ktime_t total_busy[2];
+
+ unsigned long nr_req[2];
+ /* latencies*/
+ ktime_t lat_sum[2];
+ ktime_t lat_max[2];
+ ktime_t lat_min[2];
+
+ u32 nr_queued[2];
+ ktime_t busy_start_ts[2];
+
+ ktime_t enabled_ts;
+ bool enabled;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -807,6 +822,7 @@ struct ufs_hba {
struct list_head clk_list_head;
bool wlun_dev_clr_ua;
+ bool wlun_rpmb_clr_ua;
/* Number of requests aborts */
int req_abort_count;
@@ -835,6 +851,8 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
+ struct ufs_hba_monitor monitor;
+
#ifdef CONFIG_SCSI_UFS_CRYPTO
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;
@@ -846,6 +864,9 @@ struct ufs_hba {
struct delayed_work debugfs_ee_work;
u32 debugfs_ee_rate_limit_ms;
#endif
+ u32 luns_avail;
+ bool complete_put;
+ bool rpmb_complete_put;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -936,7 +957,7 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
-int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
@@ -947,6 +968,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
+void ufshcd_hba_stop(struct ufs_hba *hba);
static inline void check_upiu_size(void)
{
@@ -1105,6 +1127,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_suspend_prepare(struct device *dev);
+void ufshcd_resume_complete(struct device *dev);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -1136,6 +1160,11 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
+static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
+{
+ return (hba->ufs_version >= ufshci_version(3, 0));
+}
+
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
{
@@ -1309,4 +1338,29 @@ static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
&hba->ee_drv_mask, set, clr);
}
+static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put(struct ufs_hba *hba)
+{
+ return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev);
+}
+
+static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
+{
+ return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
+}
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index de95be5d11d4..5affb1fce5ad 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,6 +39,7 @@ enum {
REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
REG_UTP_TASK_REQ_DOOR_BELL = 0x78,