summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 11:06:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 11:06:54 -0800
commit9820b4dca0f9c6b7ab8b4307286cdace171b724d (patch)
tree9feef543deb60557f301ceeed8f7c56ed3efedad /drivers/nvme/target
parent582cd91f69de8e44857cb610ebca661dac8656b7 (diff)
parentf4b64ae6745177642cd9610cfd7df0041e7fca58 (diff)
Merge tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: - Remove the skd driver. It's been EOL for a long time (Damien) - NVMe pull requests - fix multipath handling of ->queue_rq errors (Chao Leng) - nvmet cleanups (Chaitanya Kulkarni) - add a quirk for buggy Amazon controller (Filippo Sironi) - avoid devm allocations in nvme-hwmon that don't interact well with fabrics (Hannes Reinecke) - sysfs cleanups (Jiapeng Chong) - fix nr_zones for multipath (Keith Busch) - nvme-tcp crash fix for no-data commands (Sagi Grimberg) - nvmet-tcp fixes (Sagi Grimberg) - add a missing __rcu annotation (Christoph) - failed reconnect fixes (Chao Leng) - various tracing improvements (Michal Krakowiak, Johannes Thumshirn) - switch the nvmet-fc assoc_list to use RCU protection (Leonid Ravich) - resync the status codes with the latest spec (Max Gurtovoy) - minor nvme-tcp improvements (Sagi Grimberg) - various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya Kulkarni, Israel Rukshin) - Floppy O_NDELAY fix (Denis) - MD pull request - raid5 chunk_sectors fix (Guoqing) - Use lore links (Kees) - Use DEFINE_SHOW_ATTRIBUTE for nbd (Liao) - loop lock scaling (Pavel) - mtip32xx PCI fixes (Bjorn) - bcache fixes (Kai, Dongdong) - Misc fixes (Tian, Yang, Guoqing, Joe, Andy) * tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block: (64 commits) lightnvm: pblk: Replace guid_copy() with export_guid()/import_guid() lightnvm: fix unnecessary NULL check warnings nvme-tcp: fix crash triggered with a dataless request submission block: Replace lkml.org links with lore nbd: Convert to DEFINE_SHOW_ATTRIBUTE nvme: add 48-bit DMA address quirk for Amazon NVMe controllers nvme-hwmon: rework to avoid devm allocation nvmet: remove else at the end of the function nvmet: add nvmet_req_subsys() helper nvmet: use min of device_path and disk len nvmet: use invalid cmd opcode helper nvmet: use invalid cmd opcode helper nvmet: add helper to report invalid opcode nvmet: remove extra variable in id-ns handler nvmet: make nvmet_find_namespace() req based nvmet: return uniform error for invalid ns nvmet: set status to 0 in case for invalid nsid nvmet-fc: add a missing __rcu annotation to nvmet_fc_tgt_assoc.queues nvme-multipath: set nr_zones for zoned namespaces nvmet-tcp: fix potential race of tcp socket closing accept_work ...
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/admin-cmd.c114
-rw-r--r--drivers/nvme/target/configfs.c6
-rw-r--r--drivers/nvme/target/core.c37
-rw-r--r--drivers/nvme/target/fc.c83
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c13
-rw-r--r--drivers/nvme/target/io-cmd-file.c5
-rw-r--r--drivers/nvme/target/nvmet.h20
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/nvme/target/tcp.c59
-rw-r--r--drivers/nvme/target/trace.h9
11 files changed, 189 insertions, 165 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index dc1ea468b182..bc6a774f2124 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -74,34 +74,28 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
- if (!ns) {
- pr_err("Could not find namespace id : %d\n",
- le32_to_cpu(req->cmd->get_log_page.nsid));
- req->error_loc = offsetof(struct nvme_rw_command, nsid);
- return NVME_SC_INVALID_NS;
- }
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
/* we don't have the right data for file backed ns */
- if (!ns->bdev)
- goto out;
+ if (!req->ns->bdev)
+ return NVME_SC_SUCCESS;
- host_reads = part_stat_read(ns->bdev, ios[READ]);
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
data_units_read =
- DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
- host_writes = part_stat_read(ns->bdev, ios[WRITE]);
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
data_units_written =
- DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
-out:
- nvmet_put_namespace(ns);
return NVME_SC_SUCCESS;
}
@@ -468,10 +462,8 @@ out:
static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
- struct nvmet_ctrl *ctrl = req->sq->ctrl;
- struct nvmet_ns *ns;
struct nvme_id_ns *id;
- u16 status = 0;
+ u16 status;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -486,20 +478,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/* return an all zeroed buffer if we can't find an active namespace */
- ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns) {
- status = NVME_SC_INVALID_NS;
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = 0;
goto done;
}
- nvmet_ns_revalidate(ns);
+ nvmet_ns_revalidate(req->ns);
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
- switch (req->port->ana_state[ns->anagrpid]) {
+ id->ncap = id->nsze =
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
+ switch (req->port->ana_state[req->ns->anagrpid]) {
case NVME_ANA_INACCESSIBLE:
case NVME_ANA_PERSISTENT_LOSS:
break;
@@ -508,8 +501,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
break;
}
- if (ns->bdev)
- nvmet_bdev_set_limits(ns->bdev, id);
+ if (req->ns->bdev)
+ nvmet_bdev_set_limits(req->ns->bdev, id);
/*
* We just provide a single LBA format that matches what the
@@ -523,25 +516,24 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
- id->anagrpid = cpu_to_le32(ns->anagrpid);
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
- id->lbaf[0].ds = ns->blksize_shift;
+ id->lbaf[0].ds = req->ns->blksize_shift;
- if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
NVME_NS_DPC_PI_TYPE3;
id->mc = NVME_MC_EXTENDED_LBA;
- id->dps = ns->pi_type;
+ id->dps = req->ns->pi_type;
id->flbas = NVME_NS_FLBAS_META_EXT;
- id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
}
- if (ns->readonly)
+ if (req->ns->readonly)
id->nsattr |= (1 << 0);
- nvmet_put_namespace(ns);
done:
if (!status)
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -607,37 +599,32 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
- u16 status = 0;
off_t off = 0;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns) {
- req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = nvmet_req_find_ns(req);
+ if (status)
goto out;
- }
- if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+ if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
NVME_NIDT_UUID_LEN,
- &ns->uuid, &off);
+ &req->ns->uuid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
- if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+ if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
NVME_NIDT_NGUID_LEN,
- &ns->nguid, &off);
+ &req->ns->nguid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
-out_put_ns:
- nvmet_put_namespace(ns);
+
out:
nvmet_req_complete(req, status);
}
@@ -696,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u16 status;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
+ status = nvmet_req_find_ns(req);
+ if (status)
return status;
- }
mutex_lock(&subsys->lock);
switch (write_protect) {
@@ -757,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
void nvmet_execute_set_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
@@ -801,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req)
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 result;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
- if (!req->ns) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ result = nvmet_req_find_ns(req);
+ if (result)
+ return result;
+
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
@@ -832,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
void nvmet_execute_get_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
@@ -939,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
- if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req, cmd);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index c61ffd767062..635a7cb45d0b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -45,7 +45,7 @@ static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
{
if (p->enabled)
pr_err("Disable port '%u' before changing attribute in %s\n",
- le16_to_cpu(p->disc_addr.portid), caller);
+ le16_to_cpu(p->disc_addr.portid), caller);
return p->enabled;
}
@@ -266,10 +266,8 @@ static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
if (strtobool(page, &val))
return -EINVAL;
- if (port->enabled) {
- pr_err("Disable port before setting pi_enable value.\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
port->pi_enable = val;
return count;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8ce4d59cc9e7..67bbf0e3b507 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return status;
}
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
+{
+ pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
+ req->sq->qid);
+
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
@@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
cancel_delayed_work_sync(&ctrl->ka_work);
}
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
+u16 nvmet_req_find_ns(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
- ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
- if (ns)
- percpu_ref_get(&ns->ref);
+ req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
- return ns;
+ percpu_ref_get(&req->ns->ref);
+ return NVME_SC_SUCCESS;
}
static void nvmet_destroy_namespace(struct percpu_ref *ref)
@@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (nvmet_req_passthru_ctrl(req))
return nvmet_parse_passthru_io_cmd(req);
- req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ ret = nvmet_req_find_ns(req);
+ if (unlikely(ret))
+ return ret;
+
ret = nvmet_check_ana_state(req->port, req->ns);
if (unlikely(ret)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
@@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
- else
- return nvmet_bdev_parse_io_cmd(req);
+
+ return nvmet_bdev_parse_io_cmd(req);
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..d375745fc4ed 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct rcu_head rcu;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
@@ -164,9 +165,10 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
+ struct rcu_head rcu;
};
@@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
u16 qid, u16 sqsize)
{
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int ret;
if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- spin_lock_irqsave(&assoc->tgtport->lock, flags);
- assoc->queues[qid] = queue;
- spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+ rcu_assign_pointer(assoc->queues[qid], queue);
return queue;
@@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
{
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- unsigned long flags;
- spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
- queue->assoc->queues[queue->qid] = NULL;
- spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+ rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
@@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
destroy_workqueue(queue->work_q);
- kfree(queue);
+ kfree_rcu(queue, rcu);
}
static void
@@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
- unsigned long flags;
if (qid > NVMET_NR_QUEUES)
return NULL;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = assoc->queues[qid];
+ queue = rcu_dereference(assoc->queues[qid]);
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return queue;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return NULL;
}
@@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
if (!needrandom) {
assoc->association_id = ran;
- list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+ list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del(&assoc->a_list);
+ list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- kfree(assoc);
+ kfree_rcu(assoc, rcu);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
- spin_lock_irqsave(&tgtport->lock, flags);
+
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- queue = assoc->queues[i];
- if (queue) {
- if (!nvmet_fc_tgt_q_get(queue))
- continue;
- spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
- spin_lock_irqsave(&tgtport->lock, flags);
+ rcu_read_lock();
+ queue = rcu_dereference(assoc->queues[i]);
+ if (!queue) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ if (!nvmet_fc_tgt_q_get(queue)) {
+ rcu_read_unlock();
+ continue;
}
+ rcu_read_unlock();
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_assoc *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return ret;
}
@@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
- struct nvmet_fc_tgt_assoc *assoc, *next;
- unsigned long flags;
+ struct nvmet_fc_tgt_assoc *assoc;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry_safe(assoc, next,
- &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
}
/**
@@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
continue;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
- queue = assoc->queues[0];
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ queue = rcu_dereference(assoc->queues[0]);
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
nvmet_fc_tgtport_put(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 68213f0a052b..54606f1872b4 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1545,7 +1545,7 @@ static struct attribute *fcloop_dev_attrs[] = {
NULL
};
-static struct attribute_group fclopp_dev_attrs_group = {
+static const struct attribute_group fclopp_dev_attrs_group = {
.attrs = fcloop_dev_attrs,
};
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index bf6e0ac9ad28..3d9a5d3ed9cd 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -256,8 +256,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (is_pci_p2pdma_page(sg_page(req->sg)))
op |= REQ_NOMERGE;
- sector = le64_to_cpu(req->cmd->rw.slba);
- sector <<= (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
@@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
int ret;
ret = __blkdev_issue_discard(ns->bdev,
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
@@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- sector = le64_to_cpu(write_zeroes->slba) <<
- (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
@@ -451,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 0abbefd9925e..715d4376c997 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_file_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd for file ns %d on qid %d\n",
- cmd->common.opcode, req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 592763732065..cdfa537b1c0a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
void nvmet_subsys_put(struct nvmet_subsys *subsys);
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+u16 nvmet_req_find_ns(struct nvmet_req *req);
void nvmet_put_namespace(struct nvmet_ns *ns);
int nvmet_ns_enable(struct nvmet_ns *ns);
void nvmet_ns_disable(struct nvmet_ns *ns);
@@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req)
sizeof(struct nvme_dsm_range);
}
+static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
+{
+ return req->sq->ctrl->subsys;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
@@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
static inline struct nvme_ctrl *
nvmet_req_passthru_ctrl(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
+ return nvmet_passthru_ctrl(nvmet_req_subsys(req));
}
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
@@ -603,4 +609,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
}
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
+{
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
+}
+
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+{
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index cbc88acdd233..f50c7b2bf21c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
}
q = ns->queue;
- timeout = req->sq->ctrl->subsys->io_timeout;
+ timeout = nvmet_req_subsys(req)->io_timeout;
} else {
- timeout = req->sq->ctrl->subsys->admin_timeout;
+ timeout = nvmet_req_subsys(req)->admin_timeout;
}
rq = nvme_alloc_request(q, req->cmd, 0);
@@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return nvmet_setup_passthru_command(req);
default:
/* Reject commands not in the allowlist above */
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index aacf06f0b431..8b0485ada315 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -379,7 +379,7 @@ err:
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -387,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
crypto_ahash_digest(hash);
}
+static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ struct scatterlist sg;
+ struct kvec *iov;
+ int i;
+
+ crypto_ahash_init(hash);
+ for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+ sg_init_one(&sg, iov->iov_base, iov->iov_len);
+ ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+ crypto_ahash_update(hash);
+ }
+ ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+ crypto_ahash_final(hash);
+}
+
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -411,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -1060,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1081,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
- if (queue->data_digest) {
- nvmet_tcp_prep_recv_ddgst(cmd);
- return 0;
- }
cmd->req.execute(&cmd->req);
}
@@ -1468,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (inet->rcv_tos > 0)
ip_sock_set_tos(sock->sk, inet->rcv_tos);
+ ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1526,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
-
return 0;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index c14e3249a14d..6109b3806b12 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
- if (req->ns)
- strncpy(name, req->ns->device_path, DISK_NAME_LEN);
- else
+ if (!req->ns) {
memset(name, 0, DISK_NAME_LEN);
+ return;
+ }
+
+ strncpy(name, req->ns->device_path,
+ min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
}
#endif