diff options
-rw-r--r-- | Documentation/block/queue-sysfs.txt | 6 | ||||
-rw-r--r-- | MAINTAINERS | 9 | ||||
-rw-r--r-- | block/blk-wbt.c | 13 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 17 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 19 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 1 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 13 | ||||
-rw-r--r-- | drivers/nvme/host/scsi.c | 27 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/fcloop.c | 4 | ||||
-rw-r--r-- | fs/block_dev.c | 3 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | include/linux/genhd.h | 9 |
13 files changed, 39 insertions, 88 deletions
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index 51642159aedb..c0a3bb5a6e4e 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes. io_poll (RW) ------------ -When read, this file shows the total number of block IO polls and how -many returned success. Writing '0' to this file will disable polling -for this device. Writing any non-zero value will enable this feature. +When read, this file shows whether polling is enabled (1) or disabled +(0). Writing '0' to this file will disable polling for this device. +Writing any non-zero value will enable this feature. io_poll_delay (RW) ------------------ diff --git a/MAINTAINERS b/MAINTAINERS index 2775b14f861a..97d0b689270a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8854,17 +8854,22 @@ F: drivers/video/fbdev/nvidia/ NVM EXPRESS DRIVER M: Keith Busch <keith.busch@intel.com> M: Jens Axboe <axboe@fb.com> +M: Christoph Hellwig <hch@lst.de> +M: Sagi Grimberg <sagi@grimberg.me> L: linux-nvme@lists.infradead.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git -W: https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/ +T: git://git.infradead.org/nvme.git +W: http://git.infradead.org/nvme.git S: Supported F: drivers/nvme/host/ F: include/linux/nvme.h +F: include/uapi/linux/nvme_ioctl.h NVM EXPRESS TARGET DRIVER M: Christoph Hellwig <hch@lst.de> M: Sagi Grimberg <sagi@grimberg.me> L: linux-nvme@lists.infradead.org +T: git://git.infradead.org/nvme.git +W: http://git.infradead.org/nvme.git S: Supported F: drivers/nvme/target/ diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 6e82769f4042..f0a9c07b4c7a 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw, * the timer to kick off queuing again. */ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock) + __releases(lock) + __acquires(lock) { struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd()); DEFINE_WAIT(wait); @@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock) if (may_queue(rwb, rqw, &wait, rw)) break; - if (lock) + if (lock) { spin_unlock_irq(lock); - - io_schedule(); - - if (lock) + io_schedule(); spin_lock_irq(lock); + } else + io_schedule(); } while (1); finish_wait(&rqw->wait, &wait); @@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) * in an irq held spinlock, if it holds one when calling this function. * If we do sleep, we'll release and re-grab it. */ -unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock) +enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock) { unsigned int ret = 0; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b40cfb076f02..2fc86dc7a8df 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } - if (ctrl->stripe_size) - blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); + if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) + blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); blk_queue_virt_boundary(q, ctrl->page_size - 1); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; @@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->max_hw_sectors = min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); - if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) { - unsigned int max_hw_sectors; - - ctrl->stripe_size = 1 << (id->vs[3] + page_shift); - max_hw_sectors = ctrl->stripe_size >> (page_shift - 9); - if (ctrl->max_hw_sectors) { - ctrl->max_hw_sectors = min(max_hw_sectors, - ctrl->max_hw_sectors); - } else { - ctrl->max_hw_sectors = max_hw_sectors; - } - } - nvme_set_queue_limits(ctrl, ctrl->admin_q); ctrl->sgls = le32_to_cpu(id->sgls); ctrl->kas = le16_to_cpu(id->kas); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 771e2e761872..aa0bc60810a7 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1491,19 +1491,20 @@ static int nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) { struct nvme_fc_queue *queue = &ctrl->queues[1]; - int i, j, ret; + int i, ret; for (i = 1; i < ctrl->queue_count; i++, queue++) { ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); - if (ret) { - for (j = i-1; j >= 0; j--) - __nvme_fc_delete_hw_queue(ctrl, - &ctrl->queues[j], j); - return ret; - } + if (ret) + goto delete_queues; } return 0; + +delete_queues: + for (; i >= 0; i--) + __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); + return ret; } static int @@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, WARN_ON_ONCE(!changed); dev_info(ctrl->ctrl.device, - "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n", - ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl); + "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", + ctrl->cnum, ctrl->ctrl.opts->subsysnqn); kref_get(&ctrl->ctrl.kref); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bd5321441d12..6377e14586dc 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -135,7 +135,6 @@ struct nvme_ctrl { u32 page_size; u32 max_hw_sectors; - u32 stripe_size; u16 oncs; u16 vid; atomic_t abort_limit; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3d21a154dce7..19beeb7b2ac2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); nvme_req(req)->result = cqe.result; blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); - } - /* If the controller ignores the cq head doorbell and continuously - * writes to the queue, it is theoretically possible to wrap around - * the queue twice and mistakenly return IRQ_NONE. Linux only - * requires that 0.1% of your interrupts are handled, so this isn't - * a big problem. - */ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) return; @@ -1909,10 +1902,10 @@ static int nvme_dev_map(struct nvme_dev *dev) if (!dev->bar) goto release; - return 0; + return 0; release: - pci_release_mem_regions(pdev); - return -ENODEV; + pci_release_mem_regions(pdev); + return -ENODEV; } static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index b71e95044b43..a5c09e703bd8 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, return nvme_trans_status_code(hdr, nvme_sc); } -static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, - u8 *cmd) -{ - u8 immed, no_flush; - - immed = cmd[1] & 0x01; - no_flush = cmd[4] & 0x04; - - if (immed != 0) { - return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, - ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, - SCSI_ASCQ_CAUSE_NOT_REPORTABLE); - } else { - if (no_flush == 0) { - /* Issue NVME FLUSH command prior to START STOP UNIT */ - int res = nvme_trans_synchronize_cache(ns, hdr); - if (res) - return res; - } - - return 0; - } -} - static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *cmd) { @@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) case SECURITY_PROTOCOL_OUT: retcode = nvme_trans_security_protocol(ns, hdr, cmd); break; - case START_STOP: - retcode = nvme_trans_start_stop(ns, hdr, cmd); - break; case SYNCHRONIZE_CACHE: retcode = nvme_trans_synchronize_cache(ns, hdr); break; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index ec1ad2aa0a4c..95ae52390478 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = req->sq->ctrl->subsys; u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); - u64 val; u32 val32; u16 status = 0; @@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req) (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); break; case NVME_FEAT_KATO: - val = le64_to_cpu(req->cmd->prop_set.value); - val32 = val & 0xffff; + val32 = le32_to_cpu(req->cmd->common.cdw10[1]); req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); nvmet_set_result(req, req->sq->ctrl->kato); break; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index bcb8ebeb01c5..4e8e6a22bce1 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, rport->lport = nport->lport; nport->rport = rport; - return ret ? ret : count; + return count; } @@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr, tport->lport = nport->lport; nport->tport = tport; - return ret ? ret : count; + return count; } diff --git a/fs/block_dev.c b/fs/block_dev.c index 6254cee8f8f3..5db5d1340d69 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -328,6 +328,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) struct file *file = iocb->ki_filp; struct inode *inode = bdev_file_inode(file); struct block_device *bdev = I_BDEV(inode); + struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; bool is_read = (iov_iter_rw(iter) == READ); @@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) dio->multi_bio = false; dio->should_dirty = is_read && (iter->type == ITER_IOVEC); + blk_start_plug(&plug); for (;;) { bio->bi_bdev = bdev; bio->bi_iter.bi_sector = pos >> 9; @@ -394,6 +396,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) submit_bio(bio); bio = bio_alloc(GFP_KERNEL, nr_pages); } + blk_finish_plug(&plug); if (!dio->is_sync) return -EIOCBQUEUED; diff --git a/fs/buffer.c b/fs/buffer.c index d21771fcf7d3..0e87401cf335 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) head = page_buffers(page); bh = head; do { - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh) || (bh->b_blocknr < block)) goto next; if (bh->b_blocknr >= block + len) break; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index e0341af6950e..76f39754e7b0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -146,15 +146,6 @@ enum { DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ }; -#define BLK_SCSI_MAX_CMDS (256) -#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) - -struct blk_scsi_cmd_filter { - unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; - unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; - struct kobject kobj; -}; - struct disk_part_tbl { struct rcu_head rcu_head; int len; |