2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-gc.c - pblk's garbage collector
19 #include <linux/delay.h>
21 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
28 static int pblk_gc_write(struct pblk *pblk)
30 struct pblk_gc *gc = &pblk->gc;
31 struct pblk_gc_rq *gc_rq, *tgc_rq;
34 spin_lock(&gc->w_lock);
35 if (list_empty(&gc->w_list)) {
36 spin_unlock(&gc->w_lock);
40 list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
42 spin_unlock(&gc->w_lock);
44 list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
45 pblk_write_gc_to_cache(pblk, gc_rq);
46 list_del(&gc_rq->list);
47 kref_put(&gc_rq->line->ref, pblk_line_put);
48 pblk_gc_free_gc_rq(gc_rq);
54 static void pblk_gc_writer_kick(struct pblk_gc *gc)
56 wake_up_process(gc->gc_writer_ts);
59 static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
61 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
62 struct list_head *move_list;
64 spin_lock(&line->lock);
65 WARN_ON(line->state != PBLK_LINESTATE_GC);
66 line->state = PBLK_LINESTATE_CLOSED;
67 move_list = pblk_line_gc_list(pblk, line);
68 spin_unlock(&line->lock);
71 spin_lock(&l_mg->gc_lock);
72 list_add_tail(&line->list, move_list);
73 spin_unlock(&l_mg->gc_lock);
77 static void pblk_gc_line_ws(struct work_struct *work)
79 struct pblk_line_ws *gc_rq_ws = container_of(work,
80 struct pblk_line_ws, ws);
81 struct pblk *pblk = gc_rq_ws->pblk;
82 struct nvm_tgt_dev *dev = pblk->dev;
83 struct nvm_geo *geo = &dev->geo;
84 struct pblk_gc *gc = &pblk->gc;
85 struct pblk_line *line = gc_rq_ws->line;
86 struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
91 gc_rq->data = vmalloc(gc_rq->nr_secs * geo->csecs);
93 pr_err("pblk: could not GC line:%d (%d/%d)\n",
94 line->id, *line->vsc, gc_rq->nr_secs);
98 /* Read from GC victim block */
99 ret = pblk_submit_read_gc(pblk, gc_rq);
101 pr_err("pblk: failed GC read in line:%d (err:%d)\n",
106 if (!gc_rq->secs_to_gc)
110 spin_lock(&gc->w_lock);
111 if (gc->w_entries >= PBLK_GC_RQ_QD) {
112 spin_unlock(&gc->w_lock);
113 pblk_gc_writer_kick(&pblk->gc);
114 usleep_range(128, 256);
118 list_add_tail(&gc_rq->list, &gc->w_list);
119 spin_unlock(&gc->w_lock);
121 pblk_gc_writer_kick(&pblk->gc);
127 pblk_gc_free_gc_rq(gc_rq);
128 kref_put(&line->ref, pblk_line_put);
132 static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
133 struct pblk_line *line)
135 struct line_emeta *emeta_buf;
136 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
137 struct pblk_line_meta *lm = &pblk->lm;
138 unsigned int lba_list_size = lm->emeta_len[2];
142 emeta_buf = pblk_malloc(lm->emeta_len[0],
143 l_mg->emeta_alloc_type, GFP_KERNEL);
147 ret = pblk_line_read_emeta(pblk, line, emeta_buf);
149 pr_err("pblk: line %d read emeta failed (%d)\n",
151 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
155 /* If this read fails, it means that emeta is corrupted.
156 * For now, leave the line untouched.
157 * TODO: Implement a recovery routine that scans and moves
158 * all sectors on the line.
161 ret = pblk_recov_check_emeta(pblk, emeta_buf);
163 pr_err("pblk: inconsistent emeta (line %d)\n",
165 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
169 lba_list = pblk_malloc(lba_list_size,
170 l_mg->emeta_alloc_type, GFP_KERNEL);
172 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
174 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
179 static void pblk_gc_line_prepare_ws(struct work_struct *work)
181 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
183 struct pblk *pblk = line_ws->pblk;
184 struct pblk_line *line = line_ws->line;
185 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
186 struct pblk_line_meta *lm = &pblk->lm;
187 struct pblk_gc *gc = &pblk->gc;
188 struct pblk_line_ws *gc_rq_ws;
189 struct pblk_gc_rq *gc_rq;
191 unsigned long *invalid_bitmap;
192 int sec_left, nr_secs, bit;
194 invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
198 if (line->w_err_gc->has_write_err) {
199 lba_list = line->w_err_gc->lba_list;
200 line->w_err_gc->lba_list = NULL;
202 lba_list = get_lba_list_from_emeta(pblk, line);
204 pr_err("pblk: could not interpret emeta (line %d)\n",
210 spin_lock(&line->lock);
211 bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
212 sec_left = pblk_line_vsc(line);
213 spin_unlock(&line->lock);
216 pr_err("pblk: corrupted GC line (%d)\n", line->id);
217 goto fail_free_lba_list;
222 gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
224 goto fail_free_lba_list;
228 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
230 if (bit > line->emeta_ssec)
233 gc_rq->paddr_list[nr_secs] = bit;
234 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
235 } while (nr_secs < pblk->max_write_pgs);
237 if (unlikely(!nr_secs)) {
242 gc_rq->nr_secs = nr_secs;
245 gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
247 goto fail_free_gc_rq;
249 gc_rq_ws->pblk = pblk;
250 gc_rq_ws->line = line;
251 gc_rq_ws->priv = gc_rq;
253 /* The write GC path can be much slower than the read GC one due to
254 * the budget imposed by the rate-limiter. Balance in case that we get
255 * back pressure from the write GC path.
257 while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
260 kref_get(&line->ref);
262 INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
263 queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
270 pblk_mfree(lba_list, l_mg->emeta_alloc_type);
272 kfree(invalid_bitmap);
274 kref_put(&line->ref, pblk_line_put);
275 atomic_dec(&gc->read_inflight_gc);
282 pblk_mfree(lba_list, l_mg->emeta_alloc_type);
283 kfree(invalid_bitmap);
287 pblk_put_line_back(pblk, line);
288 kref_put(&line->ref, pblk_line_put);
289 atomic_dec(&gc->read_inflight_gc);
291 pr_err("pblk: Failed to GC line %d\n", line->id);
294 static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
296 struct pblk_gc *gc = &pblk->gc;
297 struct pblk_line_ws *line_ws;
299 pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
301 line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
305 line_ws->pblk = pblk;
306 line_ws->line = line;
308 atomic_inc(&gc->pipeline_gc);
309 INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
310 queue_work(gc->gc_reader_wq, &line_ws->ws);
315 static void pblk_gc_reader_kick(struct pblk_gc *gc)
317 wake_up_process(gc->gc_reader_ts);
320 static void pblk_gc_kick(struct pblk *pblk)
322 struct pblk_gc *gc = &pblk->gc;
324 pblk_gc_writer_kick(gc);
325 pblk_gc_reader_kick(gc);
327 /* If we're shutting down GC, let's not start it up again */
328 if (gc->gc_enabled) {
329 wake_up_process(gc->gc_ts);
330 mod_timer(&gc->gc_timer,
331 jiffies + msecs_to_jiffies(GC_TIME_MSECS));
335 static int pblk_gc_read(struct pblk *pblk)
337 struct pblk_gc *gc = &pblk->gc;
338 struct pblk_line *line;
340 spin_lock(&gc->r_lock);
341 if (list_empty(&gc->r_list)) {
342 spin_unlock(&gc->r_lock);
346 line = list_first_entry(&gc->r_list, struct pblk_line, list);
347 list_del(&line->list);
348 spin_unlock(&gc->r_lock);
352 if (pblk_gc_line(pblk, line))
353 pr_err("pblk: failed to GC line %d\n", line->id);
358 static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
359 struct list_head *group_list)
361 struct pblk_line *line, *victim;
362 int line_vsc, victim_vsc;
364 victim = list_first_entry(group_list, struct pblk_line, list);
365 list_for_each_entry(line, group_list, list) {
366 line_vsc = le32_to_cpu(*line->vsc);
367 victim_vsc = le32_to_cpu(*victim->vsc);
368 if (line_vsc < victim_vsc)
375 static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
377 unsigned int nr_blocks_free, nr_blocks_need;
378 unsigned int werr_lines = atomic_read(&rl->werr_lines);
380 nr_blocks_need = pblk_rl_high_thrs(rl);
381 nr_blocks_free = pblk_rl_nr_free_blks(rl);
383 /* This is not critical, no need to take lock here */
384 return ((werr_lines > 0) ||
385 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
388 void pblk_gc_free_full_lines(struct pblk *pblk)
390 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
391 struct pblk_gc *gc = &pblk->gc;
392 struct pblk_line *line;
395 spin_lock(&l_mg->gc_lock);
396 if (list_empty(&l_mg->gc_full_list)) {
397 spin_unlock(&l_mg->gc_lock);
401 line = list_first_entry(&l_mg->gc_full_list,
402 struct pblk_line, list);
404 spin_lock(&line->lock);
405 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
406 line->state = PBLK_LINESTATE_GC;
407 spin_unlock(&line->lock);
409 list_del(&line->list);
410 spin_unlock(&l_mg->gc_lock);
412 atomic_inc(&gc->pipeline_gc);
413 kref_put(&line->ref, pblk_line_put);
418 * Lines with no valid sectors will be returned to the free list immediately. If
419 * GC is activated - either because the free block count is under the determined
420 * threshold, or because it is being forced from user space - only lines with a
421 * high count of invalid sectors will be recycled.
423 static void pblk_gc_run(struct pblk *pblk)
425 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
426 struct pblk_gc *gc = &pblk->gc;
427 struct pblk_line *line;
428 struct list_head *group_list;
430 int read_inflight_gc, gc_group = 0, prev_group = 0;
432 pblk_gc_free_full_lines(pblk);
434 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
435 if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
439 group_list = l_mg->gc_lists[gc_group++];
442 spin_lock(&l_mg->gc_lock);
443 if (list_empty(group_list)) {
444 spin_unlock(&l_mg->gc_lock);
448 line = pblk_gc_get_victim_line(pblk, group_list);
450 spin_lock(&line->lock);
451 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
452 line->state = PBLK_LINESTATE_GC;
453 spin_unlock(&line->lock);
455 list_del(&line->list);
456 spin_unlock(&l_mg->gc_lock);
458 spin_lock(&gc->r_lock);
459 list_add_tail(&line->list, &gc->r_list);
460 spin_unlock(&gc->r_lock);
462 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
463 pblk_gc_reader_kick(gc);
467 /* No need to queue up more GC lines than we can handle */
468 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
469 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
473 if (!prev_group && pblk->rl.rb_state > gc_group &&
474 gc_group < PBLK_GC_NR_LISTS)
478 static void pblk_gc_timer(struct timer_list *t)
480 struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
485 static int pblk_gc_ts(void *data)
487 struct pblk *pblk = data;
489 while (!kthread_should_stop()) {
491 set_current_state(TASK_INTERRUPTIBLE);
498 static int pblk_gc_writer_ts(void *data)
500 struct pblk *pblk = data;
502 while (!kthread_should_stop()) {
503 if (!pblk_gc_write(pblk))
505 set_current_state(TASK_INTERRUPTIBLE);
512 static int pblk_gc_reader_ts(void *data)
514 struct pblk *pblk = data;
515 struct pblk_gc *gc = &pblk->gc;
517 while (!kthread_should_stop()) {
518 if (!pblk_gc_read(pblk))
520 set_current_state(TASK_INTERRUPTIBLE);
524 #ifdef CONFIG_NVM_DEBUG
525 pr_info("pblk: flushing gc pipeline, %d lines left\n",
526 atomic_read(&gc->pipeline_gc));
530 if (!atomic_read(&gc->pipeline_gc))
539 static void pblk_gc_start(struct pblk *pblk)
541 pblk->gc.gc_active = 1;
542 pr_debug("pblk: gc start\n");
545 void pblk_gc_should_start(struct pblk *pblk)
547 struct pblk_gc *gc = &pblk->gc;
549 if (gc->gc_enabled && !gc->gc_active) {
555 void pblk_gc_should_stop(struct pblk *pblk)
557 struct pblk_gc *gc = &pblk->gc;
559 if (gc->gc_active && !gc->gc_forced)
563 void pblk_gc_should_kick(struct pblk *pblk)
565 pblk_rl_update_rates(&pblk->rl);
568 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
571 struct pblk_gc *gc = &pblk->gc;
573 spin_lock(&gc->lock);
574 *gc_enabled = gc->gc_enabled;
575 *gc_active = gc->gc_active;
576 spin_unlock(&gc->lock);
579 int pblk_gc_sysfs_force(struct pblk *pblk, int force)
581 struct pblk_gc *gc = &pblk->gc;
583 if (force < 0 || force > 1)
586 spin_lock(&gc->lock);
587 gc->gc_forced = force;
593 spin_unlock(&gc->lock);
595 pblk_gc_should_start(pblk);
600 int pblk_gc_init(struct pblk *pblk)
602 struct pblk_gc *gc = &pblk->gc;
605 gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
606 if (IS_ERR(gc->gc_ts)) {
607 pr_err("pblk: could not allocate GC main kthread\n");
608 return PTR_ERR(gc->gc_ts);
611 gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
612 "pblk-gc-writer-ts");
613 if (IS_ERR(gc->gc_writer_ts)) {
614 pr_err("pblk: could not allocate GC writer kthread\n");
615 ret = PTR_ERR(gc->gc_writer_ts);
616 goto fail_free_main_kthread;
619 gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
620 "pblk-gc-reader-ts");
621 if (IS_ERR(gc->gc_reader_ts)) {
622 pr_err("pblk: could not allocate GC reader kthread\n");
623 ret = PTR_ERR(gc->gc_reader_ts);
624 goto fail_free_writer_kthread;
627 timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
628 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
634 atomic_set(&gc->read_inflight_gc, 0);
635 atomic_set(&gc->pipeline_gc, 0);
637 /* Workqueue that reads valid sectors from a line and submit them to the
638 * GC writer to be recycled.
640 gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
641 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
642 if (!gc->gc_line_reader_wq) {
643 pr_err("pblk: could not allocate GC line reader workqueue\n");
645 goto fail_free_reader_kthread;
648 /* Workqueue that prepare lines for GC */
649 gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
650 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
651 if (!gc->gc_reader_wq) {
652 pr_err("pblk: could not allocate GC reader workqueue\n");
654 goto fail_free_reader_line_wq;
657 spin_lock_init(&gc->lock);
658 spin_lock_init(&gc->w_lock);
659 spin_lock_init(&gc->r_lock);
661 sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
663 INIT_LIST_HEAD(&gc->w_list);
664 INIT_LIST_HEAD(&gc->r_list);
668 fail_free_reader_line_wq:
669 destroy_workqueue(gc->gc_line_reader_wq);
670 fail_free_reader_kthread:
671 kthread_stop(gc->gc_reader_ts);
672 fail_free_writer_kthread:
673 kthread_stop(gc->gc_writer_ts);
674 fail_free_main_kthread:
675 kthread_stop(gc->gc_ts);
680 void pblk_gc_exit(struct pblk *pblk, bool graceful)
682 struct pblk_gc *gc = &pblk->gc;
685 del_timer_sync(&gc->gc_timer);
689 kthread_stop(gc->gc_ts);
691 if (gc->gc_reader_ts)
692 kthread_stop(gc->gc_reader_ts);
695 flush_workqueue(gc->gc_reader_wq);
696 flush_workqueue(gc->gc_line_reader_wq);
699 destroy_workqueue(gc->gc_reader_wq);
700 destroy_workqueue(gc->gc_line_reader_wq);
702 if (gc->gc_writer_ts)
703 kthread_stop(gc->gc_writer_ts);