2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
21 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
22 static DEFINE_IDA(cntlid_ida);
25 * This read/write semaphore is used to synchronize access to configuration
26 * information on a target system that will result in discovery log page
27 * information change for at least one host.
28 * The full list of resources to protected by this semaphore is:
31 * - per-subsystem allowed hosts list
32 * - allow_any_host subsystem attribute
34 * - the nvmet_transports array
36 * When updating any of those lists/structures write lock should be obtained,
37 * while when reading (popolating discovery log page or checking host-subsystem
38 * link) read lock is obtained to allow concurrent reads.
40 DECLARE_RWSEM(nvmet_config_sem);
42 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
43 const char *subsysnqn);
45 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
48 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
49 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
53 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
55 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
56 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
60 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
62 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
63 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
67 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
71 if (list_empty(&subsys->namespaces))
74 ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
78 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
80 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
83 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
85 struct nvmet_req *req;
88 mutex_lock(&ctrl->lock);
89 if (!ctrl->nr_async_event_cmds) {
90 mutex_unlock(&ctrl->lock);
94 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
95 mutex_unlock(&ctrl->lock);
96 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
100 static void nvmet_async_event_work(struct work_struct *work)
102 struct nvmet_ctrl *ctrl =
103 container_of(work, struct nvmet_ctrl, async_event_work);
104 struct nvmet_async_event *aen;
105 struct nvmet_req *req;
108 mutex_lock(&ctrl->lock);
109 aen = list_first_entry_or_null(&ctrl->async_events,
110 struct nvmet_async_event, entry);
111 if (!aen || !ctrl->nr_async_event_cmds) {
112 mutex_unlock(&ctrl->lock);
116 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
117 nvmet_set_result(req, nvmet_async_event_result(aen));
119 list_del(&aen->entry);
122 mutex_unlock(&ctrl->lock);
123 nvmet_req_complete(req, 0);
127 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
128 u8 event_info, u8 log_page)
130 struct nvmet_async_event *aen;
132 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
136 aen->event_type = event_type;
137 aen->event_info = event_info;
138 aen->log_page = log_page;
140 mutex_lock(&ctrl->lock);
141 list_add_tail(&aen->entry, &ctrl->async_events);
142 mutex_unlock(&ctrl->lock);
144 schedule_work(&ctrl->async_event_work);
147 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
151 down_write(&nvmet_config_sem);
152 if (nvmet_transports[ops->type])
155 nvmet_transports[ops->type] = ops;
156 up_write(&nvmet_config_sem);
160 EXPORT_SYMBOL_GPL(nvmet_register_transport);
162 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
164 down_write(&nvmet_config_sem);
165 nvmet_transports[ops->type] = NULL;
166 up_write(&nvmet_config_sem);
168 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
170 int nvmet_enable_port(struct nvmet_port *port)
172 const struct nvmet_fabrics_ops *ops;
175 lockdep_assert_held(&nvmet_config_sem);
177 ops = nvmet_transports[port->disc_addr.trtype];
179 up_write(&nvmet_config_sem);
180 request_module("nvmet-transport-%d", port->disc_addr.trtype);
181 down_write(&nvmet_config_sem);
182 ops = nvmet_transports[port->disc_addr.trtype];
184 pr_err("transport type %d not supported\n",
185 port->disc_addr.trtype);
190 if (!try_module_get(ops->owner))
193 ret = ops->add_port(port);
195 module_put(ops->owner);
199 port->enabled = true;
203 void nvmet_disable_port(struct nvmet_port *port)
205 const struct nvmet_fabrics_ops *ops;
207 lockdep_assert_held(&nvmet_config_sem);
209 port->enabled = false;
211 ops = nvmet_transports[port->disc_addr.trtype];
212 ops->remove_port(port);
213 module_put(ops->owner);
216 static void nvmet_keep_alive_timer(struct work_struct *work)
218 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
219 struct nvmet_ctrl, ka_work);
221 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
222 ctrl->cntlid, ctrl->kato);
224 nvmet_ctrl_fatal_error(ctrl);
227 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
229 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
230 ctrl->cntlid, ctrl->kato);
232 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
233 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
236 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
238 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
240 cancel_delayed_work_sync(&ctrl->ka_work);
243 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
248 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
249 if (ns->nsid == le32_to_cpu(nsid))
256 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
261 ns = __nvmet_find_namespace(ctrl, nsid);
263 percpu_ref_get(&ns->ref);
269 static void nvmet_destroy_namespace(struct percpu_ref *ref)
271 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
273 complete(&ns->disable_done);
276 void nvmet_put_namespace(struct nvmet_ns *ns)
278 percpu_ref_put(&ns->ref);
281 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
283 nvmet_bdev_ns_disable(ns);
284 nvmet_file_ns_disable(ns);
287 int nvmet_ns_enable(struct nvmet_ns *ns)
289 struct nvmet_subsys *subsys = ns->subsys;
290 struct nvmet_ctrl *ctrl;
293 mutex_lock(&subsys->lock);
297 ret = nvmet_bdev_ns_enable(ns);
299 ret = nvmet_file_ns_enable(ns);
303 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
308 if (ns->nsid > subsys->max_nsid)
309 subsys->max_nsid = ns->nsid;
312 * The namespaces list needs to be sorted to simplify the implementation
313 * of the Identify Namepace List subcommand.
315 if (list_empty(&subsys->namespaces)) {
316 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
318 struct nvmet_ns *old;
320 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
321 BUG_ON(ns->nsid == old->nsid);
322 if (ns->nsid < old->nsid)
326 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
329 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
330 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
335 mutex_unlock(&subsys->lock);
338 nvmet_ns_dev_disable(ns);
342 void nvmet_ns_disable(struct nvmet_ns *ns)
344 struct nvmet_subsys *subsys = ns->subsys;
345 struct nvmet_ctrl *ctrl;
347 mutex_lock(&subsys->lock);
352 list_del_rcu(&ns->dev_link);
353 if (ns->nsid == subsys->max_nsid)
354 subsys->max_nsid = nvmet_max_nsid(subsys);
355 mutex_unlock(&subsys->lock);
358 * Now that we removed the namespaces from the lookup list, we
359 * can kill the per_cpu ref and wait for any remaining references
360 * to be dropped, as well as a RCU grace period for anyone only
361 * using the namepace under rcu_read_lock(). Note that we can't
362 * use call_rcu here as we need to ensure the namespaces have
363 * been fully destroyed before unloading the module.
365 percpu_ref_kill(&ns->ref);
367 wait_for_completion(&ns->disable_done);
368 percpu_ref_exit(&ns->ref);
370 mutex_lock(&subsys->lock);
371 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
372 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
374 nvmet_ns_dev_disable(ns);
376 mutex_unlock(&subsys->lock);
379 void nvmet_ns_free(struct nvmet_ns *ns)
381 nvmet_ns_disable(ns);
383 kfree(ns->device_path);
387 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
391 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
395 INIT_LIST_HEAD(&ns->dev_link);
396 init_completion(&ns->disable_done);
405 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
407 u32 old_sqhd, new_sqhd;
411 nvmet_set_status(req, status);
415 old_sqhd = req->sq->sqhd;
416 new_sqhd = (old_sqhd + 1) % req->sq->size;
417 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
420 sqhd = req->sq->sqhd & 0x0000FFFF;
421 req->rsp->sq_head = cpu_to_le16(sqhd);
422 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
423 req->rsp->command_id = req->cmd->common.command_id;
426 nvmet_put_namespace(req->ns);
427 req->ops->queue_response(req);
430 void nvmet_req_complete(struct nvmet_req *req, u16 status)
432 __nvmet_req_complete(req, status);
433 percpu_ref_put(&req->sq->ref);
435 EXPORT_SYMBOL_GPL(nvmet_req_complete);
437 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
446 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
456 static void nvmet_confirm_sq(struct percpu_ref *ref)
458 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
460 complete(&sq->confirm_done);
463 void nvmet_sq_destroy(struct nvmet_sq *sq)
466 * If this is the admin queue, complete all AERs so that our
467 * queue doesn't have outstanding requests on it.
469 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
470 nvmet_async_events_free(sq->ctrl);
471 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
472 wait_for_completion(&sq->confirm_done);
473 wait_for_completion(&sq->free_done);
474 percpu_ref_exit(&sq->ref);
477 nvmet_ctrl_put(sq->ctrl);
478 sq->ctrl = NULL; /* allows reusing the queue later */
481 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
483 static void nvmet_sq_free(struct percpu_ref *ref)
485 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
487 complete(&sq->free_done);
490 int nvmet_sq_init(struct nvmet_sq *sq)
494 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
496 pr_err("percpu_ref init failed!\n");
499 init_completion(&sq->free_done);
500 init_completion(&sq->confirm_done);
504 EXPORT_SYMBOL_GPL(nvmet_sq_init);
506 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
508 struct nvme_command *cmd = req->cmd;
511 ret = nvmet_check_ctrl_status(req, cmd);
515 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
516 if (unlikely(!req->ns))
517 return NVME_SC_INVALID_NS | NVME_SC_DNR;
520 return nvmet_file_parse_io_cmd(req);
522 return nvmet_bdev_parse_io_cmd(req);
525 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
526 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
528 u8 flags = req->cmd->common.flags;
536 req->transfer_len = 0;
537 req->rsp->status = 0;
540 /* no support for fused commands yet */
541 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
542 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
547 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
548 * contains an address of a single contiguous physical buffer that is
551 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
552 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
556 if (unlikely(!req->sq->ctrl))
557 /* will return an error for any Non-connect command: */
558 status = nvmet_parse_connect_cmd(req);
559 else if (likely(req->sq->qid != 0))
560 status = nvmet_parse_io_cmd(req);
561 else if (req->cmd->common.opcode == nvme_fabrics_command)
562 status = nvmet_parse_fabrics_cmd(req);
563 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
564 status = nvmet_parse_discovery_cmd(req);
566 status = nvmet_parse_admin_cmd(req);
571 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
572 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
579 __nvmet_req_complete(req, status);
582 EXPORT_SYMBOL_GPL(nvmet_req_init);
584 void nvmet_req_uninit(struct nvmet_req *req)
586 percpu_ref_put(&req->sq->ref);
588 nvmet_put_namespace(req->ns);
590 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
592 void nvmet_req_execute(struct nvmet_req *req)
594 if (unlikely(req->data_len != req->transfer_len))
595 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
599 EXPORT_SYMBOL_GPL(nvmet_req_execute);
601 static inline bool nvmet_cc_en(u32 cc)
603 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
606 static inline u8 nvmet_cc_css(u32 cc)
608 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
611 static inline u8 nvmet_cc_mps(u32 cc)
613 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
616 static inline u8 nvmet_cc_ams(u32 cc)
618 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
621 static inline u8 nvmet_cc_shn(u32 cc)
623 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
626 static inline u8 nvmet_cc_iosqes(u32 cc)
628 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
631 static inline u8 nvmet_cc_iocqes(u32 cc)
633 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
636 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
638 lockdep_assert_held(&ctrl->lock);
640 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
641 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
642 nvmet_cc_mps(ctrl->cc) != 0 ||
643 nvmet_cc_ams(ctrl->cc) != 0 ||
644 nvmet_cc_css(ctrl->cc) != 0) {
645 ctrl->csts = NVME_CSTS_CFS;
649 ctrl->csts = NVME_CSTS_RDY;
652 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
654 lockdep_assert_held(&ctrl->lock);
656 /* XXX: tear down queues? */
657 ctrl->csts &= ~NVME_CSTS_RDY;
661 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
665 mutex_lock(&ctrl->lock);
669 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
670 nvmet_start_ctrl(ctrl);
671 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
672 nvmet_clear_ctrl(ctrl);
673 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
674 nvmet_clear_ctrl(ctrl);
675 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
677 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
678 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
679 mutex_unlock(&ctrl->lock);
682 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
684 /* command sets supported: NVMe command set: */
685 ctrl->cap = (1ULL << 37);
686 /* CC.EN timeout in 500msec units: */
687 ctrl->cap |= (15ULL << 24);
688 /* maximum queue entries supported: */
689 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
692 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
693 struct nvmet_req *req, struct nvmet_ctrl **ret)
695 struct nvmet_subsys *subsys;
696 struct nvmet_ctrl *ctrl;
699 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
701 pr_warn("connect request for invalid subsystem %s!\n",
703 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
704 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
707 mutex_lock(&subsys->lock);
708 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
709 if (ctrl->cntlid == cntlid) {
710 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
711 pr_warn("hostnqn mismatch.\n");
714 if (!kref_get_unless_zero(&ctrl->ref))
722 pr_warn("could not find controller %d for subsys %s / host %s\n",
723 cntlid, subsysnqn, hostnqn);
724 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
725 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
728 mutex_unlock(&subsys->lock);
729 nvmet_subsys_put(subsys);
733 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
735 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
736 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
737 cmd->common.opcode, req->sq->qid);
738 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
741 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
742 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
743 cmd->common.opcode, req->sq->qid);
744 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
749 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
752 struct nvmet_host_link *p;
754 if (subsys->allow_any_host)
757 list_for_each_entry(p, &subsys->hosts, entry) {
758 if (!strcmp(nvmet_host_name(p->host), hostnqn))
765 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
768 struct nvmet_subsys_link *s;
770 list_for_each_entry(s, &req->port->subsystems, entry) {
771 if (__nvmet_host_allowed(s->subsys, hostnqn))
778 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
781 lockdep_assert_held(&nvmet_config_sem);
783 if (subsys->type == NVME_NQN_DISC)
784 return nvmet_host_discovery_allowed(req, hostnqn);
786 return __nvmet_host_allowed(subsys, hostnqn);
789 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
790 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
792 struct nvmet_subsys *subsys;
793 struct nvmet_ctrl *ctrl;
797 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
798 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
800 pr_warn("connect request for invalid subsystem %s!\n",
802 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
806 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
807 down_read(&nvmet_config_sem);
808 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
809 pr_info("connect by host %s for subsystem %s not allowed\n",
811 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
812 up_read(&nvmet_config_sem);
813 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
814 goto out_put_subsystem;
816 up_read(&nvmet_config_sem);
818 status = NVME_SC_INTERNAL;
819 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
821 goto out_put_subsystem;
822 mutex_init(&ctrl->lock);
824 nvmet_init_cap(ctrl);
826 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
827 INIT_LIST_HEAD(&ctrl->async_events);
829 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
830 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
832 kref_init(&ctrl->ref);
833 ctrl->subsys = subsys;
835 ctrl->cqs = kcalloc(subsys->max_qid + 1,
836 sizeof(struct nvmet_cq *),
841 ctrl->sqs = kcalloc(subsys->max_qid + 1,
842 sizeof(struct nvmet_sq *),
847 ret = ida_simple_get(&cntlid_ida,
848 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
851 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
856 ctrl->ops = req->ops;
857 if (ctrl->subsys->type == NVME_NQN_DISC) {
858 /* Don't accept keep-alive timeout for discovery controllers */
860 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
865 * Discovery controllers use some arbitrary high value in order
866 * to cleanup stale discovery sessions
868 * From the latest base diff RC:
869 * "The Keep Alive command is not supported by
870 * Discovery controllers. A transport may specify a
871 * fixed Discovery controller activity timeout value
872 * (e.g., 2 minutes). If no commands are received
873 * by a Discovery controller within that time
874 * period, the controller may perform the
875 * actions for Keep Alive Timer expiration".
877 ctrl->kato = NVMET_DISC_KATO;
879 /* keep-alive timeout in seconds */
880 ctrl->kato = DIV_ROUND_UP(kato, 1000);
882 nvmet_start_keep_alive_timer(ctrl);
884 mutex_lock(&subsys->lock);
885 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
886 mutex_unlock(&subsys->lock);
892 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
900 nvmet_subsys_put(subsys);
905 static void nvmet_ctrl_free(struct kref *ref)
907 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
908 struct nvmet_subsys *subsys = ctrl->subsys;
910 mutex_lock(&subsys->lock);
911 list_del(&ctrl->subsys_entry);
912 mutex_unlock(&subsys->lock);
914 nvmet_stop_keep_alive_timer(ctrl);
916 flush_work(&ctrl->async_event_work);
917 cancel_work_sync(&ctrl->fatal_err_work);
919 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
925 nvmet_subsys_put(subsys);
928 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
930 kref_put(&ctrl->ref, nvmet_ctrl_free);
933 static void nvmet_fatal_error_handler(struct work_struct *work)
935 struct nvmet_ctrl *ctrl =
936 container_of(work, struct nvmet_ctrl, fatal_err_work);
938 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
939 ctrl->ops->delete_ctrl(ctrl);
942 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
944 mutex_lock(&ctrl->lock);
945 if (!(ctrl->csts & NVME_CSTS_CFS)) {
946 ctrl->csts |= NVME_CSTS_CFS;
947 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
948 schedule_work(&ctrl->fatal_err_work);
950 mutex_unlock(&ctrl->lock);
952 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
954 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
955 const char *subsysnqn)
957 struct nvmet_subsys_link *p;
962 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
964 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
966 return nvmet_disc_subsys;
969 down_read(&nvmet_config_sem);
970 list_for_each_entry(p, &port->subsystems, entry) {
971 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
973 if (!kref_get_unless_zero(&p->subsys->ref))
975 up_read(&nvmet_config_sem);
979 up_read(&nvmet_config_sem);
983 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
984 enum nvme_subsys_type type)
986 struct nvmet_subsys *subsys;
988 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
992 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
993 /* generate a random serial number as our controllers are ephemeral: */
994 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
998 subsys->max_qid = NVMET_NR_QUEUES;
1001 subsys->max_qid = 0;
1004 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1008 subsys->type = type;
1009 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1011 if (!subsys->subsysnqn) {
1016 kref_init(&subsys->ref);
1018 mutex_init(&subsys->lock);
1019 INIT_LIST_HEAD(&subsys->namespaces);
1020 INIT_LIST_HEAD(&subsys->ctrls);
1021 INIT_LIST_HEAD(&subsys->hosts);
1026 static void nvmet_subsys_free(struct kref *ref)
1028 struct nvmet_subsys *subsys =
1029 container_of(ref, struct nvmet_subsys, ref);
1031 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1033 kfree(subsys->subsysnqn);
1037 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1039 struct nvmet_ctrl *ctrl;
1041 mutex_lock(&subsys->lock);
1042 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1043 ctrl->ops->delete_ctrl(ctrl);
1044 mutex_unlock(&subsys->lock);
1047 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1049 kref_put(&subsys->ref, nvmet_subsys_free);
1052 static int __init nvmet_init(void)
1056 error = nvmet_init_discovery();
1060 error = nvmet_init_configfs();
1062 goto out_exit_discovery;
1066 nvmet_exit_discovery();
1071 static void __exit nvmet_exit(void)
1073 nvmet_exit_configfs();
1074 nvmet_exit_discovery();
1075 ida_destroy(&cntlid_ida);
1077 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1078 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1081 module_init(nvmet_init);
1082 module_exit(nvmet_exit);
1084 MODULE_LICENSE("GPL v2");