summaryrefslogtreecommitdiff
path: root/drivers/scsi/megaraid/megaraid_sas_base.c
diff options
context:
space:
mode:
authorKashyap Desai <kashyap.desai@broadcom.com>2016-10-21 06:33:31 -0700
committerMartin K. Petersen <martin.petersen@oracle.com>2016-11-08 17:29:57 -0500
commita1dfd62c1ebce71a62f5de002c694d5a22fb32a1 (patch)
tree42ca564da7c9a486d43fa7fd4887e984da9f4b11 /drivers/scsi/megaraid/megaraid_sas_base.c
parentb3e3827bdd329da1c1b5697e74dfcaf51b65885c (diff)
scsi: megaraid_sas: Do not fire DCMDs during PCI shutdown/detach
This patch addresses the issue of driver firing DCMDs in PCI shutdown/detach path irrespective of firmware state. Driver will now check whether firmware is in operational state or not before firing DCMDs. If firmware is in unrecoverable state or does not become operational within specfied time, driver will skip firing DCMDs. [mkp: fixed typos] Signed-off-by: Sumit Saxena <sumit.saxena@broadcom.com> Signed-off-by: Shivasharan Srikanteshwara <shivasharan.srikanteshwara@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/megaraid/megaraid_sas_base.c')
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd4c8aa..d5410a39d956 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6245,6 +6245,34 @@ fail_reenable_msix:
#define megasas_resume NULL
#endif
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+ int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+ int i;
+
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+ return 1;
+
+ for (i = 0; i < wait_time; i++) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+ dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
@@ -6269,9 +6297,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+skip_firing_dcmds:
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
@@ -6385,8 +6418,14 @@ static void megasas_shutdown(struct pci_dev *pdev)
struct megasas_instance *instance = pci_get_drvdata(pdev);
instance->unload = 1;
+
+ if (megasas_wait_for_adapter_operational(instance))
+ goto skip_firing_dcmds;
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);