diff options
author | Sebastian Ott <sebott@linux.ibm.com> | 2018-05-16 11:25:21 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2018-05-25 08:12:50 +0200 |
commit | 1bcdb5354aee2c0abcd13d912be35ae39a4144b6 (patch) | |
tree | 80c2825c2bb008202e437261492229531f4848de /drivers/s390 | |
parent | a166c368e7dbc84a5d8f013d9fda99f47f9c9f13 (diff) |
s390/dasd: simplify locking in dasd_times_out
Provide __dasd_cancel_req that is called with the ccw device lock
held to simplify the locking in dasd_times_out. Also this removes
the following sparse warning:
context imbalance in 'dasd_times_out' - different lock contexts for basic block
Note: with this change dasd_schedule_device_bh is now called (via
dasd_cancel_req) with the ccw device lock held. But is is already
the case for other codepaths.
Signed-off-by: Sebastian Ott <sebott@linux.ibm.com>
Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 04143c08bd6e..f401b4c38cfc 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly); * Cancellation of a request is an asynchronous operation! The calling * function has to wait until the request is properly returned via callback. */ -int dasd_cancel_req(struct dasd_ccw_req *cqr) +static int __dasd_cancel_req(struct dasd_ccw_req *cqr) { struct dasd_device *device = cqr->startdev; - unsigned long flags; - int rc; + int rc = 0; - rc = 0; - spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); switch (cqr->status) { case DASD_CQR_QUEUED: /* request was not started - just set to cleared */ @@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) default: /* already finished or clear pending - do nothing */ break; } - spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_schedule_device_bh(device); return rc; } -EXPORT_SYMBOL(dasd_cancel_req); + +int dasd_cancel_req(struct dasd_ccw_req *cqr) +{ + struct dasd_device *device = cqr->startdev; + unsigned long flags; + int rc; + + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + rc = __dasd_cancel_req(cqr); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + return rc; +} /* * SECTION: Operations of the dasd_block layer. @@ -3082,12 +3089,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) cqr->retries = -1; cqr->intrc = -ETIMEDOUT; if (cqr->status >= DASD_CQR_QUEUED) { - spin_unlock(get_ccwdev_lock(device->cdev)); - rc = dasd_cancel_req(cqr); + rc = __dasd_cancel_req(cqr); } else if (cqr->status == DASD_CQR_FILLED || cqr->status == DASD_CQR_NEED_ERP) { cqr->status = DASD_CQR_TERMINATED; - spin_unlock(get_ccwdev_lock(device->cdev)); } else if (cqr->status == DASD_CQR_IN_ERP) { struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; @@ -3102,9 +3107,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) searchcqr->retries = -1; searchcqr->intrc = -ETIMEDOUT; if (searchcqr->status >= DASD_CQR_QUEUED) { - spin_unlock(get_ccwdev_lock(device->cdev)); - rc = dasd_cancel_req(searchcqr); - spin_lock(get_ccwdev_lock(device->cdev)); + rc = __dasd_cancel_req(searchcqr); } else if ((searchcqr->status == DASD_CQR_FILLED) || (searchcqr->status == DASD_CQR_NEED_ERP)) { searchcqr->status = DASD_CQR_TERMINATED; @@ -3118,8 +3121,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) } break; } - spin_unlock(get_ccwdev_lock(device->cdev)); } + spin_unlock(get_ccwdev_lock(device->cdev)); dasd_schedule_block_bh(block); spin_unlock(&block->queue_lock); spin_unlock_irqrestore(&cqr->dq->lock, flags); |