]> git.neil.brown.name Git - linux.git/blob - drivers/nvme/target/core.c
nvmet: add a new nvmet_zero_sgl helper
[linux.git] / drivers / nvme / target / core.c
1 /*
2  * Common code for the NVMe target.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
18
19 #include "nvmet.h"
20
21 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
22 static DEFINE_IDA(cntlid_ida);
23
24 /*
25  * This read/write semaphore is used to synchronize access to configuration
26  * information on a target system that will result in discovery log page
27  * information change for at least one host.
28  * The full list of resources to protected by this semaphore is:
29  *
30  *  - subsystems list
31  *  - per-subsystem allowed hosts list
32  *  - allow_any_host subsystem attribute
33  *  - nvmet_genctr
34  *  - the nvmet_transports array
35  *
36  * When updating any of those lists/structures write lock should be obtained,
37  * while when reading (popolating discovery log page or checking host-subsystem
38  * link) read lock is obtained to allow concurrent reads.
39  */
40 DECLARE_RWSEM(nvmet_config_sem);
41
42 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
43                 const char *subsysnqn);
44
45 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
46                 size_t len)
47 {
48         if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
49                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
50         return 0;
51 }
52
53 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
54 {
55         if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
56                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
57         return 0;
58 }
59
60 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
61 {
62         if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
63                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
64         return 0;
65 }
66
67 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
68 {
69         struct nvmet_ns *ns;
70
71         if (list_empty(&subsys->namespaces))
72                 return 0;
73
74         ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
75         return ns->nsid;
76 }
77
78 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
79 {
80         return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
81 }
82
83 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
84 {
85         struct nvmet_req *req;
86
87         while (1) {
88                 mutex_lock(&ctrl->lock);
89                 if (!ctrl->nr_async_event_cmds) {
90                         mutex_unlock(&ctrl->lock);
91                         return;
92                 }
93
94                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
95                 mutex_unlock(&ctrl->lock);
96                 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
97         }
98 }
99
100 static void nvmet_async_event_work(struct work_struct *work)
101 {
102         struct nvmet_ctrl *ctrl =
103                 container_of(work, struct nvmet_ctrl, async_event_work);
104         struct nvmet_async_event *aen;
105         struct nvmet_req *req;
106
107         while (1) {
108                 mutex_lock(&ctrl->lock);
109                 aen = list_first_entry_or_null(&ctrl->async_events,
110                                 struct nvmet_async_event, entry);
111                 if (!aen || !ctrl->nr_async_event_cmds) {
112                         mutex_unlock(&ctrl->lock);
113                         return;
114                 }
115
116                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
117                 nvmet_set_result(req, nvmet_async_event_result(aen));
118
119                 list_del(&aen->entry);
120                 kfree(aen);
121
122                 mutex_unlock(&ctrl->lock);
123                 nvmet_req_complete(req, 0);
124         }
125 }
126
127 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
128                 u8 event_info, u8 log_page)
129 {
130         struct nvmet_async_event *aen;
131
132         aen = kmalloc(sizeof(*aen), GFP_KERNEL);
133         if (!aen)
134                 return;
135
136         aen->event_type = event_type;
137         aen->event_info = event_info;
138         aen->log_page = log_page;
139
140         mutex_lock(&ctrl->lock);
141         list_add_tail(&aen->entry, &ctrl->async_events);
142         mutex_unlock(&ctrl->lock);
143
144         schedule_work(&ctrl->async_event_work);
145 }
146
147 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
148 {
149         int ret = 0;
150
151         down_write(&nvmet_config_sem);
152         if (nvmet_transports[ops->type])
153                 ret = -EINVAL;
154         else
155                 nvmet_transports[ops->type] = ops;
156         up_write(&nvmet_config_sem);
157
158         return ret;
159 }
160 EXPORT_SYMBOL_GPL(nvmet_register_transport);
161
162 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
163 {
164         down_write(&nvmet_config_sem);
165         nvmet_transports[ops->type] = NULL;
166         up_write(&nvmet_config_sem);
167 }
168 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
169
170 int nvmet_enable_port(struct nvmet_port *port)
171 {
172         const struct nvmet_fabrics_ops *ops;
173         int ret;
174
175         lockdep_assert_held(&nvmet_config_sem);
176
177         ops = nvmet_transports[port->disc_addr.trtype];
178         if (!ops) {
179                 up_write(&nvmet_config_sem);
180                 request_module("nvmet-transport-%d", port->disc_addr.trtype);
181                 down_write(&nvmet_config_sem);
182                 ops = nvmet_transports[port->disc_addr.trtype];
183                 if (!ops) {
184                         pr_err("transport type %d not supported\n",
185                                 port->disc_addr.trtype);
186                         return -EINVAL;
187                 }
188         }
189
190         if (!try_module_get(ops->owner))
191                 return -EINVAL;
192
193         ret = ops->add_port(port);
194         if (ret) {
195                 module_put(ops->owner);
196                 return ret;
197         }
198
199         port->enabled = true;
200         return 0;
201 }
202
203 void nvmet_disable_port(struct nvmet_port *port)
204 {
205         const struct nvmet_fabrics_ops *ops;
206
207         lockdep_assert_held(&nvmet_config_sem);
208
209         port->enabled = false;
210
211         ops = nvmet_transports[port->disc_addr.trtype];
212         ops->remove_port(port);
213         module_put(ops->owner);
214 }
215
216 static void nvmet_keep_alive_timer(struct work_struct *work)
217 {
218         struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
219                         struct nvmet_ctrl, ka_work);
220
221         pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
222                 ctrl->cntlid, ctrl->kato);
223
224         nvmet_ctrl_fatal_error(ctrl);
225 }
226
227 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
228 {
229         pr_debug("ctrl %d start keep-alive timer for %d secs\n",
230                 ctrl->cntlid, ctrl->kato);
231
232         INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
233         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
234 }
235
236 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
237 {
238         pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
239
240         cancel_delayed_work_sync(&ctrl->ka_work);
241 }
242
243 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
244                 __le32 nsid)
245 {
246         struct nvmet_ns *ns;
247
248         list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
249                 if (ns->nsid == le32_to_cpu(nsid))
250                         return ns;
251         }
252
253         return NULL;
254 }
255
256 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
257 {
258         struct nvmet_ns *ns;
259
260         rcu_read_lock();
261         ns = __nvmet_find_namespace(ctrl, nsid);
262         if (ns)
263                 percpu_ref_get(&ns->ref);
264         rcu_read_unlock();
265
266         return ns;
267 }
268
269 static void nvmet_destroy_namespace(struct percpu_ref *ref)
270 {
271         struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
272
273         complete(&ns->disable_done);
274 }
275
276 void nvmet_put_namespace(struct nvmet_ns *ns)
277 {
278         percpu_ref_put(&ns->ref);
279 }
280
281 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
282 {
283         nvmet_bdev_ns_disable(ns);
284         nvmet_file_ns_disable(ns);
285 }
286
287 int nvmet_ns_enable(struct nvmet_ns *ns)
288 {
289         struct nvmet_subsys *subsys = ns->subsys;
290         struct nvmet_ctrl *ctrl;
291         int ret = 0;
292
293         mutex_lock(&subsys->lock);
294         if (ns->enabled)
295                 goto out_unlock;
296
297         ret = nvmet_bdev_ns_enable(ns);
298         if (ret)
299                 ret = nvmet_file_ns_enable(ns);
300         if (ret)
301                 goto out_unlock;
302
303         ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
304                                 0, GFP_KERNEL);
305         if (ret)
306                 goto out_dev_put;
307
308         if (ns->nsid > subsys->max_nsid)
309                 subsys->max_nsid = ns->nsid;
310
311         /*
312          * The namespaces list needs to be sorted to simplify the implementation
313          * of the Identify Namepace List subcommand.
314          */
315         if (list_empty(&subsys->namespaces)) {
316                 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
317         } else {
318                 struct nvmet_ns *old;
319
320                 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
321                         BUG_ON(ns->nsid == old->nsid);
322                         if (ns->nsid < old->nsid)
323                                 break;
324                 }
325
326                 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
327         }
328
329         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
330                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
331
332         ns->enabled = true;
333         ret = 0;
334 out_unlock:
335         mutex_unlock(&subsys->lock);
336         return ret;
337 out_dev_put:
338         nvmet_ns_dev_disable(ns);
339         goto out_unlock;
340 }
341
342 void nvmet_ns_disable(struct nvmet_ns *ns)
343 {
344         struct nvmet_subsys *subsys = ns->subsys;
345         struct nvmet_ctrl *ctrl;
346
347         mutex_lock(&subsys->lock);
348         if (!ns->enabled)
349                 goto out_unlock;
350
351         ns->enabled = false;
352         list_del_rcu(&ns->dev_link);
353         if (ns->nsid == subsys->max_nsid)
354                 subsys->max_nsid = nvmet_max_nsid(subsys);
355         mutex_unlock(&subsys->lock);
356
357         /*
358          * Now that we removed the namespaces from the lookup list, we
359          * can kill the per_cpu ref and wait for any remaining references
360          * to be dropped, as well as a RCU grace period for anyone only
361          * using the namepace under rcu_read_lock().  Note that we can't
362          * use call_rcu here as we need to ensure the namespaces have
363          * been fully destroyed before unloading the module.
364          */
365         percpu_ref_kill(&ns->ref);
366         synchronize_rcu();
367         wait_for_completion(&ns->disable_done);
368         percpu_ref_exit(&ns->ref);
369
370         mutex_lock(&subsys->lock);
371         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
372                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
373
374         nvmet_ns_dev_disable(ns);
375 out_unlock:
376         mutex_unlock(&subsys->lock);
377 }
378
379 void nvmet_ns_free(struct nvmet_ns *ns)
380 {
381         nvmet_ns_disable(ns);
382
383         kfree(ns->device_path);
384         kfree(ns);
385 }
386
387 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
388 {
389         struct nvmet_ns *ns;
390
391         ns = kzalloc(sizeof(*ns), GFP_KERNEL);
392         if (!ns)
393                 return NULL;
394
395         INIT_LIST_HEAD(&ns->dev_link);
396         init_completion(&ns->disable_done);
397
398         ns->nsid = nsid;
399         ns->subsys = subsys;
400         uuid_gen(&ns->uuid);
401
402         return ns;
403 }
404
405 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
406 {
407         u32 old_sqhd, new_sqhd;
408         u16 sqhd;
409
410         if (status)
411                 nvmet_set_status(req, status);
412
413         if (req->sq->size) {
414                 do {
415                         old_sqhd = req->sq->sqhd;
416                         new_sqhd = (old_sqhd + 1) % req->sq->size;
417                 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
418                                         old_sqhd);
419         }
420         sqhd = req->sq->sqhd & 0x0000FFFF;
421         req->rsp->sq_head = cpu_to_le16(sqhd);
422         req->rsp->sq_id = cpu_to_le16(req->sq->qid);
423         req->rsp->command_id = req->cmd->common.command_id;
424
425         if (req->ns)
426                 nvmet_put_namespace(req->ns);
427         req->ops->queue_response(req);
428 }
429
430 void nvmet_req_complete(struct nvmet_req *req, u16 status)
431 {
432         __nvmet_req_complete(req, status);
433         percpu_ref_put(&req->sq->ref);
434 }
435 EXPORT_SYMBOL_GPL(nvmet_req_complete);
436
437 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
438                 u16 qid, u16 size)
439 {
440         cq->qid = qid;
441         cq->size = size;
442
443         ctrl->cqs[qid] = cq;
444 }
445
446 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
447                 u16 qid, u16 size)
448 {
449         sq->sqhd = 0;
450         sq->qid = qid;
451         sq->size = size;
452
453         ctrl->sqs[qid] = sq;
454 }
455
456 static void nvmet_confirm_sq(struct percpu_ref *ref)
457 {
458         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
459
460         complete(&sq->confirm_done);
461 }
462
463 void nvmet_sq_destroy(struct nvmet_sq *sq)
464 {
465         /*
466          * If this is the admin queue, complete all AERs so that our
467          * queue doesn't have outstanding requests on it.
468          */
469         if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
470                 nvmet_async_events_free(sq->ctrl);
471         percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
472         wait_for_completion(&sq->confirm_done);
473         wait_for_completion(&sq->free_done);
474         percpu_ref_exit(&sq->ref);
475
476         if (sq->ctrl) {
477                 nvmet_ctrl_put(sq->ctrl);
478                 sq->ctrl = NULL; /* allows reusing the queue later */
479         }
480 }
481 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
482
483 static void nvmet_sq_free(struct percpu_ref *ref)
484 {
485         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
486
487         complete(&sq->free_done);
488 }
489
490 int nvmet_sq_init(struct nvmet_sq *sq)
491 {
492         int ret;
493
494         ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
495         if (ret) {
496                 pr_err("percpu_ref init failed!\n");
497                 return ret;
498         }
499         init_completion(&sq->free_done);
500         init_completion(&sq->confirm_done);
501
502         return 0;
503 }
504 EXPORT_SYMBOL_GPL(nvmet_sq_init);
505
506 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
507 {
508         struct nvme_command *cmd = req->cmd;
509         u16 ret;
510
511         ret = nvmet_check_ctrl_status(req, cmd);
512         if (unlikely(ret))
513                 return ret;
514
515         req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
516         if (unlikely(!req->ns))
517                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
518
519         if (req->ns->file)
520                 return nvmet_file_parse_io_cmd(req);
521         else
522                 return nvmet_bdev_parse_io_cmd(req);
523 }
524
525 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
526                 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
527 {
528         u8 flags = req->cmd->common.flags;
529         u16 status;
530
531         req->cq = cq;
532         req->sq = sq;
533         req->ops = ops;
534         req->sg = NULL;
535         req->sg_cnt = 0;
536         req->transfer_len = 0;
537         req->rsp->status = 0;
538         req->ns = NULL;
539
540         /* no support for fused commands yet */
541         if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
542                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
543                 goto fail;
544         }
545
546         /*
547          * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
548          * contains an address of a single contiguous physical buffer that is
549          * byte aligned.
550          */
551         if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
552                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
553                 goto fail;
554         }
555
556         if (unlikely(!req->sq->ctrl))
557                 /* will return an error for any Non-connect command: */
558                 status = nvmet_parse_connect_cmd(req);
559         else if (likely(req->sq->qid != 0))
560                 status = nvmet_parse_io_cmd(req);
561         else if (req->cmd->common.opcode == nvme_fabrics_command)
562                 status = nvmet_parse_fabrics_cmd(req);
563         else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
564                 status = nvmet_parse_discovery_cmd(req);
565         else
566                 status = nvmet_parse_admin_cmd(req);
567
568         if (status)
569                 goto fail;
570
571         if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
572                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
573                 goto fail;
574         }
575
576         return true;
577
578 fail:
579         __nvmet_req_complete(req, status);
580         return false;
581 }
582 EXPORT_SYMBOL_GPL(nvmet_req_init);
583
584 void nvmet_req_uninit(struct nvmet_req *req)
585 {
586         percpu_ref_put(&req->sq->ref);
587         if (req->ns)
588                 nvmet_put_namespace(req->ns);
589 }
590 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
591
592 void nvmet_req_execute(struct nvmet_req *req)
593 {
594         if (unlikely(req->data_len != req->transfer_len))
595                 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
596         else
597                 req->execute(req);
598 }
599 EXPORT_SYMBOL_GPL(nvmet_req_execute);
600
601 static inline bool nvmet_cc_en(u32 cc)
602 {
603         return (cc >> NVME_CC_EN_SHIFT) & 0x1;
604 }
605
606 static inline u8 nvmet_cc_css(u32 cc)
607 {
608         return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
609 }
610
611 static inline u8 nvmet_cc_mps(u32 cc)
612 {
613         return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
614 }
615
616 static inline u8 nvmet_cc_ams(u32 cc)
617 {
618         return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
619 }
620
621 static inline u8 nvmet_cc_shn(u32 cc)
622 {
623         return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
624 }
625
626 static inline u8 nvmet_cc_iosqes(u32 cc)
627 {
628         return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
629 }
630
631 static inline u8 nvmet_cc_iocqes(u32 cc)
632 {
633         return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
634 }
635
636 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
637 {
638         lockdep_assert_held(&ctrl->lock);
639
640         if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
641             nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
642             nvmet_cc_mps(ctrl->cc) != 0 ||
643             nvmet_cc_ams(ctrl->cc) != 0 ||
644             nvmet_cc_css(ctrl->cc) != 0) {
645                 ctrl->csts = NVME_CSTS_CFS;
646                 return;
647         }
648
649         ctrl->csts = NVME_CSTS_RDY;
650 }
651
652 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
653 {
654         lockdep_assert_held(&ctrl->lock);
655
656         /* XXX: tear down queues? */
657         ctrl->csts &= ~NVME_CSTS_RDY;
658         ctrl->cc = 0;
659 }
660
661 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
662 {
663         u32 old;
664
665         mutex_lock(&ctrl->lock);
666         old = ctrl->cc;
667         ctrl->cc = new;
668
669         if (nvmet_cc_en(new) && !nvmet_cc_en(old))
670                 nvmet_start_ctrl(ctrl);
671         if (!nvmet_cc_en(new) && nvmet_cc_en(old))
672                 nvmet_clear_ctrl(ctrl);
673         if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
674                 nvmet_clear_ctrl(ctrl);
675                 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
676         }
677         if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
678                 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
679         mutex_unlock(&ctrl->lock);
680 }
681
682 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
683 {
684         /* command sets supported: NVMe command set: */
685         ctrl->cap = (1ULL << 37);
686         /* CC.EN timeout in 500msec units: */
687         ctrl->cap |= (15ULL << 24);
688         /* maximum queue entries supported: */
689         ctrl->cap |= NVMET_QUEUE_SIZE - 1;
690 }
691
692 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
693                 struct nvmet_req *req, struct nvmet_ctrl **ret)
694 {
695         struct nvmet_subsys *subsys;
696         struct nvmet_ctrl *ctrl;
697         u16 status = 0;
698
699         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
700         if (!subsys) {
701                 pr_warn("connect request for invalid subsystem %s!\n",
702                         subsysnqn);
703                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
704                 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
705         }
706
707         mutex_lock(&subsys->lock);
708         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
709                 if (ctrl->cntlid == cntlid) {
710                         if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
711                                 pr_warn("hostnqn mismatch.\n");
712                                 continue;
713                         }
714                         if (!kref_get_unless_zero(&ctrl->ref))
715                                 continue;
716
717                         *ret = ctrl;
718                         goto out;
719                 }
720         }
721
722         pr_warn("could not find controller %d for subsys %s / host %s\n",
723                 cntlid, subsysnqn, hostnqn);
724         req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
725         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
726
727 out:
728         mutex_unlock(&subsys->lock);
729         nvmet_subsys_put(subsys);
730         return status;
731 }
732
733 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
734 {
735         if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
736                 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
737                        cmd->common.opcode, req->sq->qid);
738                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
739         }
740
741         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
742                 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
743                        cmd->common.opcode, req->sq->qid);
744                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
745         }
746         return 0;
747 }
748
749 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
750                 const char *hostnqn)
751 {
752         struct nvmet_host_link *p;
753
754         if (subsys->allow_any_host)
755                 return true;
756
757         list_for_each_entry(p, &subsys->hosts, entry) {
758                 if (!strcmp(nvmet_host_name(p->host), hostnqn))
759                         return true;
760         }
761
762         return false;
763 }
764
765 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
766                 const char *hostnqn)
767 {
768         struct nvmet_subsys_link *s;
769
770         list_for_each_entry(s, &req->port->subsystems, entry) {
771                 if (__nvmet_host_allowed(s->subsys, hostnqn))
772                         return true;
773         }
774
775         return false;
776 }
777
778 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
779                 const char *hostnqn)
780 {
781         lockdep_assert_held(&nvmet_config_sem);
782
783         if (subsys->type == NVME_NQN_DISC)
784                 return nvmet_host_discovery_allowed(req, hostnqn);
785         else
786                 return __nvmet_host_allowed(subsys, hostnqn);
787 }
788
789 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
790                 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
791 {
792         struct nvmet_subsys *subsys;
793         struct nvmet_ctrl *ctrl;
794         int ret;
795         u16 status;
796
797         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
798         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
799         if (!subsys) {
800                 pr_warn("connect request for invalid subsystem %s!\n",
801                         subsysnqn);
802                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
803                 goto out;
804         }
805
806         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
807         down_read(&nvmet_config_sem);
808         if (!nvmet_host_allowed(req, subsys, hostnqn)) {
809                 pr_info("connect by host %s for subsystem %s not allowed\n",
810                         hostnqn, subsysnqn);
811                 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
812                 up_read(&nvmet_config_sem);
813                 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
814                 goto out_put_subsystem;
815         }
816         up_read(&nvmet_config_sem);
817
818         status = NVME_SC_INTERNAL;
819         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
820         if (!ctrl)
821                 goto out_put_subsystem;
822         mutex_init(&ctrl->lock);
823
824         nvmet_init_cap(ctrl);
825
826         INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
827         INIT_LIST_HEAD(&ctrl->async_events);
828
829         memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
830         memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
831
832         kref_init(&ctrl->ref);
833         ctrl->subsys = subsys;
834
835         ctrl->cqs = kcalloc(subsys->max_qid + 1,
836                         sizeof(struct nvmet_cq *),
837                         GFP_KERNEL);
838         if (!ctrl->cqs)
839                 goto out_free_ctrl;
840
841         ctrl->sqs = kcalloc(subsys->max_qid + 1,
842                         sizeof(struct nvmet_sq *),
843                         GFP_KERNEL);
844         if (!ctrl->sqs)
845                 goto out_free_cqs;
846
847         ret = ida_simple_get(&cntlid_ida,
848                              NVME_CNTLID_MIN, NVME_CNTLID_MAX,
849                              GFP_KERNEL);
850         if (ret < 0) {
851                 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
852                 goto out_free_sqs;
853         }
854         ctrl->cntlid = ret;
855
856         ctrl->ops = req->ops;
857         if (ctrl->subsys->type == NVME_NQN_DISC) {
858                 /* Don't accept keep-alive timeout for discovery controllers */
859                 if (kato) {
860                         status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
861                         goto out_remove_ida;
862                 }
863
864                 /*
865                  * Discovery controllers use some arbitrary high value in order
866                  * to cleanup stale discovery sessions
867                  *
868                  * From the latest base diff RC:
869                  * "The Keep Alive command is not supported by
870                  * Discovery controllers. A transport may specify a
871                  * fixed Discovery controller activity timeout value
872                  * (e.g., 2 minutes).  If no commands are received
873                  * by a Discovery controller within that time
874                  * period, the controller may perform the
875                  * actions for Keep Alive Timer expiration".
876                  */
877                 ctrl->kato = NVMET_DISC_KATO;
878         } else {
879                 /* keep-alive timeout in seconds */
880                 ctrl->kato = DIV_ROUND_UP(kato, 1000);
881         }
882         nvmet_start_keep_alive_timer(ctrl);
883
884         mutex_lock(&subsys->lock);
885         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
886         mutex_unlock(&subsys->lock);
887
888         *ctrlp = ctrl;
889         return 0;
890
891 out_remove_ida:
892         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
893 out_free_sqs:
894         kfree(ctrl->sqs);
895 out_free_cqs:
896         kfree(ctrl->cqs);
897 out_free_ctrl:
898         kfree(ctrl);
899 out_put_subsystem:
900         nvmet_subsys_put(subsys);
901 out:
902         return status;
903 }
904
905 static void nvmet_ctrl_free(struct kref *ref)
906 {
907         struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
908         struct nvmet_subsys *subsys = ctrl->subsys;
909
910         mutex_lock(&subsys->lock);
911         list_del(&ctrl->subsys_entry);
912         mutex_unlock(&subsys->lock);
913
914         nvmet_stop_keep_alive_timer(ctrl);
915
916         flush_work(&ctrl->async_event_work);
917         cancel_work_sync(&ctrl->fatal_err_work);
918
919         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
920
921         kfree(ctrl->sqs);
922         kfree(ctrl->cqs);
923         kfree(ctrl);
924
925         nvmet_subsys_put(subsys);
926 }
927
928 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
929 {
930         kref_put(&ctrl->ref, nvmet_ctrl_free);
931 }
932
933 static void nvmet_fatal_error_handler(struct work_struct *work)
934 {
935         struct nvmet_ctrl *ctrl =
936                         container_of(work, struct nvmet_ctrl, fatal_err_work);
937
938         pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
939         ctrl->ops->delete_ctrl(ctrl);
940 }
941
942 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
943 {
944         mutex_lock(&ctrl->lock);
945         if (!(ctrl->csts & NVME_CSTS_CFS)) {
946                 ctrl->csts |= NVME_CSTS_CFS;
947                 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
948                 schedule_work(&ctrl->fatal_err_work);
949         }
950         mutex_unlock(&ctrl->lock);
951 }
952 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
953
954 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
955                 const char *subsysnqn)
956 {
957         struct nvmet_subsys_link *p;
958
959         if (!port)
960                 return NULL;
961
962         if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
963                         NVMF_NQN_SIZE)) {
964                 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
965                         return NULL;
966                 return nvmet_disc_subsys;
967         }
968
969         down_read(&nvmet_config_sem);
970         list_for_each_entry(p, &port->subsystems, entry) {
971                 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
972                                 NVMF_NQN_SIZE)) {
973                         if (!kref_get_unless_zero(&p->subsys->ref))
974                                 break;
975                         up_read(&nvmet_config_sem);
976                         return p->subsys;
977                 }
978         }
979         up_read(&nvmet_config_sem);
980         return NULL;
981 }
982
983 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
984                 enum nvme_subsys_type type)
985 {
986         struct nvmet_subsys *subsys;
987
988         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
989         if (!subsys)
990                 return NULL;
991
992         subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
993         /* generate a random serial number as our controllers are ephemeral: */
994         get_random_bytes(&subsys->serial, sizeof(subsys->serial));
995
996         switch (type) {
997         case NVME_NQN_NVME:
998                 subsys->max_qid = NVMET_NR_QUEUES;
999                 break;
1000         case NVME_NQN_DISC:
1001                 subsys->max_qid = 0;
1002                 break;
1003         default:
1004                 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1005                 kfree(subsys);
1006                 return NULL;
1007         }
1008         subsys->type = type;
1009         subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1010                         GFP_KERNEL);
1011         if (!subsys->subsysnqn) {
1012                 kfree(subsys);
1013                 return NULL;
1014         }
1015
1016         kref_init(&subsys->ref);
1017
1018         mutex_init(&subsys->lock);
1019         INIT_LIST_HEAD(&subsys->namespaces);
1020         INIT_LIST_HEAD(&subsys->ctrls);
1021         INIT_LIST_HEAD(&subsys->hosts);
1022
1023         return subsys;
1024 }
1025
1026 static void nvmet_subsys_free(struct kref *ref)
1027 {
1028         struct nvmet_subsys *subsys =
1029                 container_of(ref, struct nvmet_subsys, ref);
1030
1031         WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1032
1033         kfree(subsys->subsysnqn);
1034         kfree(subsys);
1035 }
1036
1037 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1038 {
1039         struct nvmet_ctrl *ctrl;
1040
1041         mutex_lock(&subsys->lock);
1042         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1043                 ctrl->ops->delete_ctrl(ctrl);
1044         mutex_unlock(&subsys->lock);
1045 }
1046
1047 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1048 {
1049         kref_put(&subsys->ref, nvmet_subsys_free);
1050 }
1051
1052 static int __init nvmet_init(void)
1053 {
1054         int error;
1055
1056         error = nvmet_init_discovery();
1057         if (error)
1058                 goto out;
1059
1060         error = nvmet_init_configfs();
1061         if (error)
1062                 goto out_exit_discovery;
1063         return 0;
1064
1065 out_exit_discovery:
1066         nvmet_exit_discovery();
1067 out:
1068         return error;
1069 }
1070
1071 static void __exit nvmet_exit(void)
1072 {
1073         nvmet_exit_configfs();
1074         nvmet_exit_discovery();
1075         ida_destroy(&cntlid_ida);
1076
1077         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1078         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1079 }
1080
1081 module_init(nvmet_init);
1082 module_exit(nvmet_exit);
1083
1084 MODULE_LICENSE("GPL v2");