2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
11 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
12 - kmod support by: Cyrus Durgin
13 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
14 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16 - lots of fixes and improvements to the RAID1/RAID5 and generic
17 RAID code (such as request based resynchronization):
19 Neil Brown <neilb@cse.unsw.edu.au>.
21 This program is free software; you can redistribute it and/or modify
22 it under the terms of the GNU General Public License as published by
23 the Free Software Foundation; either version 2, or (at your option)
26 You should have received a copy of the GNU General Public License
27 (for example /usr/src/linux/COPYING); if not, write to the Free
28 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/module.h>
32 #include <linux/config.h>
33 #include <linux/raid/md.h>
34 #include <linux/sysctl.h>
35 #include <linux/raid/xor.h>
36 #include <linux/devfs_fs_kernel.h>
38 #include <linux/init.h>
41 #include <linux/kmod.h>
44 #define __KERNEL_SYSCALLS__
45 #include <linux/unistd.h>
47 #include <asm/unaligned.h>
49 #define MAJOR_NR MD_MAJOR
52 #include <linux/blk.h>
56 # define dprintk(x...) printk(x)
58 # define dprintk(x...) do { } while(0)
62 static void autostart_arrays (void);
65 static mdk_personality_t *pers[MAX_PERSONALITY];
68 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
69 * is 100 KB/sec, so the extra system load does not show up that much.
70 * Increase it if you want to have more _guaranteed_ speed. Note that
71 * the RAID driver will use the maximum available bandwith if the IO
72 * subsystem is idle. There is also an 'absolute maximum' reconstruction
73 * speed limit - in case reconstruction slows down your system despite
76 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
79 static int sysctl_speed_limit_min = 100;
80 static int sysctl_speed_limit_max = 100000;
82 static struct ctl_table_header *raid_table_header;
84 static ctl_table raid_table[] = {
85 {DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min",
86 &sysctl_speed_limit_min, sizeof(int), 0644, NULL, &proc_dointvec},
87 {DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max",
88 &sysctl_speed_limit_max, sizeof(int), 0644, NULL, &proc_dointvec},
92 static ctl_table raid_dir_table[] = {
93 {DEV_RAID, "raid", NULL, 0, 0555, raid_table},
97 static ctl_table raid_root_table[] = {
98 {CTL_DEV, "dev", NULL, 0, 0555, raid_dir_table},
103 * these have to be allocated separately because external
104 * subsystems want to have a pre-defined structure
106 struct hd_struct md_hd_struct[MAX_MD_DEVS];
107 static int md_blocksizes[MAX_MD_DEVS];
108 static int md_hardsect_sizes[MAX_MD_DEVS];
109 static mdk_thread_t *md_recovery_thread;
111 int md_size[MAX_MD_DEVS];
113 static struct block_device_operations md_fops;
114 static devfs_handle_t devfs_handle;
116 static struct gendisk md_gendisk=
124 nr_real: MAX_MD_DEVS,
131 * Enables to iterate over all existing md arrays
133 static MD_LIST_HEAD(all_mddevs);
135 static mddev_t *mddev_map[MAX_MD_DEVS];
137 static inline mddev_t * kdev_to_mddev (kdev_t dev)
139 if (MAJOR(dev) != MD_MAJOR)
141 return mddev_map[MINOR(dev)];
144 static int md_fail_request (request_queue_t *q, struct bio *bio)
150 static mddev_t * alloc_mddev(kdev_t dev)
154 if (MAJOR(dev) != MD_MAJOR) {
158 mddev = (mddev_t *) kmalloc(sizeof(*mddev), GFP_KERNEL);
162 memset(mddev, 0, sizeof(*mddev));
164 mddev->__minor = MINOR(dev);
165 init_MUTEX(&mddev->reconfig_sem);
166 init_MUTEX(&mddev->recovery_sem);
167 init_MUTEX(&mddev->resync_sem);
168 MD_INIT_LIST_HEAD(&mddev->disks);
169 MD_INIT_LIST_HEAD(&mddev->all_mddevs);
170 atomic_set(&mddev->active, 0);
172 mddev_map[mdidx(mddev)] = mddev;
173 md_list_add(&mddev->all_mddevs, &all_mddevs);
180 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
183 struct md_list_head *tmp;
185 ITERATE_RDEV(mddev,rdev,tmp) {
186 if (rdev->desc_nr == nr)
192 mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev)
194 struct md_list_head *tmp;
197 ITERATE_RDEV(mddev,rdev,tmp) {
198 if (rdev->dev == dev)
204 static MD_LIST_HEAD(device_names);
206 char * partition_name(kdev_t dev)
209 static char nomem [] = "<nomem>";
211 struct md_list_head *tmp;
213 list_for_each(tmp, &device_names) {
214 dname = md_list_entry(tmp, dev_name_t, list);
215 if (dname->dev == dev)
219 dname = (dev_name_t *) kmalloc(sizeof(*dname), GFP_KERNEL);
224 * ok, add this new device name to the list
226 hd = get_gendisk (dev);
229 dname->name = disk_name (hd, MINOR(dev), dname->namebuf);
231 sprintf (dname->namebuf, "[dev %s]", kdevname(dev));
232 dname->name = dname->namebuf;
236 md_list_add(&dname->list, &device_names);
241 static unsigned int calc_dev_sboffset(kdev_t dev, mddev_t *mddev,
244 unsigned int size = 0;
246 if (blk_size[MAJOR(dev)])
247 size = blk_size[MAJOR(dev)][MINOR(dev)];
249 size = MD_NEW_SIZE_BLOCKS(size);
253 static unsigned int calc_dev_size(kdev_t dev, mddev_t *mddev, int persistent)
257 size = calc_dev_sboffset(dev, mddev, persistent);
262 if (mddev->sb->chunk_size)
263 size &= ~(mddev->sb->chunk_size/1024 - 1);
267 static unsigned int zoned_raid_size(mddev_t *mddev)
271 struct md_list_head *tmp;
278 * do size and offset calculations.
280 mask = ~(mddev->sb->chunk_size/1024 - 1);
282 ITERATE_RDEV(mddev,rdev,tmp) {
284 md_size[mdidx(mddev)] += rdev->size;
289 static void remove_descriptor(mdp_disk_t *disk, mdp_super_t *sb)
291 if (disk_active(disk)) {
294 if (disk_spare(disk)) {
304 mark_disk_removed(disk);
307 #define BAD_MAGIC KERN_ERR \
308 "md: invalid raid superblock magic on %s\n"
310 #define BAD_MINOR KERN_ERR \
311 "md: %s: invalid raid minor (%x)\n"
313 #define OUT_OF_MEM KERN_ALERT \
314 "md: out of memory.\n"
316 #define NO_SB KERN_ERR \
317 "md: disabled device %s, could not read superblock.\n"
319 #define BAD_CSUM KERN_WARNING \
320 "md: invalid superblock checksum on %s\n"
322 static int alloc_array_sb(mddev_t * mddev)
329 mddev->sb = (mdp_super_t *) __get_free_page (GFP_KERNEL);
332 md_clear_page(mddev->sb);
336 static int alloc_disk_sb(mdk_rdev_t * rdev)
341 rdev->sb_page = alloc_page(GFP_KERNEL);
342 if (!rdev->sb_page) {
346 rdev->sb = (mdp_super_t *) page_address(rdev->sb_page);
351 static void free_disk_sb(mdk_rdev_t * rdev)
354 page_cache_release(rdev->sb_page);
356 rdev->sb_page = NULL;
366 static void bh_complete(struct buffer_head *bh, int uptodate)
370 set_bit(BH_Uptodate, &bh->b_state);
372 complete((struct completion*)bh->b_private);
375 static int sync_page_io(kdev_t dev, unsigned long sector, int size,
376 struct page *page, int rw)
378 struct buffer_head bh;
379 struct completion event;
381 init_completion(&event);
382 init_buffer(&bh, bh_complete, &event);
384 bh.b_rsector = sector;
385 bh.b_state = (1 << BH_Req) | (1 << BH_Mapped) | (1 << BH_Lock);
389 bh.b_data = page_address(page);
390 generic_make_request(rw, &bh);
392 run_task_queue(&tq_disk);
393 wait_for_completion(&event);
395 return test_bit(BH_Uptodate, &bh.b_state);
398 static int read_disk_sb(mdk_rdev_t * rdev)
401 kdev_t dev = rdev->dev;
402 unsigned long sb_offset;
410 * Calculate the position of the superblock,
411 * it's at the end of the disk
413 sb_offset = calc_dev_sboffset(rdev->dev, rdev->mddev, 1);
414 rdev->sb_offset = sb_offset;
416 if (!sync_page_io(dev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ)) {
417 printk(NO_SB,partition_name(dev));
420 printk(KERN_INFO " [events: %08lx]\n", (unsigned long)rdev->sb->events_lo);
426 static unsigned int calc_sb_csum(mdp_super_t * sb)
428 unsigned int disk_csum, csum;
430 disk_csum = sb->sb_csum;
432 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
433 sb->sb_csum = disk_csum;
438 * Check one RAID superblock for generic plausibility
441 static int check_disk_sb(mdk_rdev_t * rdev)
452 if (sb->md_magic != MD_SB_MAGIC) {
453 printk(BAD_MAGIC, partition_name(rdev->dev));
457 if (sb->md_minor >= MAX_MD_DEVS) {
458 printk(BAD_MINOR, partition_name(rdev->dev), sb->md_minor);
462 if (calc_sb_csum(sb) != sb->sb_csum) {
463 printk(BAD_CSUM, partition_name(rdev->dev));
471 static kdev_t dev_unit(kdev_t dev)
474 struct gendisk *hd = get_gendisk(dev);
478 mask = ~((1 << hd->minor_shift) - 1);
480 return MKDEV(MAJOR(dev), MINOR(dev) & mask);
483 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, kdev_t dev)
485 struct md_list_head *tmp;
488 ITERATE_RDEV(mddev,rdev,tmp)
489 if (dev_unit(rdev->dev) == dev_unit(dev))
495 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
497 struct md_list_head *tmp;
500 ITERATE_RDEV(mddev1,rdev,tmp)
501 if (match_dev_unit(mddev2, rdev->dev))
507 static MD_LIST_HEAD(all_raid_disks);
508 static MD_LIST_HEAD(pending_raid_disks);
510 static void bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
512 mdk_rdev_t *same_pdev;
518 same_pdev = match_dev_unit(mddev, rdev->dev);
521 "md%d: WARNING: %s appears to be on the same physical disk as %s. True\n"
522 " protection against single-disk failure might be compromised.\n",
523 mdidx(mddev), partition_name(rdev->dev),
524 partition_name(same_pdev->dev));
526 md_list_add(&rdev->same_set, &mddev->disks);
528 printk(KERN_INFO "md: bind<%s>\n", partition_name(rdev->dev));
531 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
537 list_del_init(&rdev->same_set);
538 printk(KERN_INFO "md: unbind<%s>\n", partition_name(rdev->dev));
543 * prevent the device from being mounted, repartitioned or
544 * otherwise reused by a RAID array (or any other kernel
545 * subsystem), by opening the device. [simply getting an
546 * inode is not enough, the SCSI module usage code needs
547 * an explicit open() on the device]
549 static int lock_rdev(mdk_rdev_t *rdev)
552 struct block_device *bdev;
554 bdev = bdget(rdev->dev);
557 err = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_RAW);
563 static void unlock_rdev(mdk_rdev_t *rdev)
565 struct block_device *bdev = rdev->bdev;
569 blkdev_put(bdev, BDEV_RAW);
572 void md_autodetect_dev(kdev_t dev);
574 static void export_rdev(mdk_rdev_t * rdev)
576 printk(KERN_INFO "md: export_rdev(%s)\n",partition_name(rdev->dev));
581 list_del_init(&rdev->all);
582 if (!list_empty(&rdev->pending)) {
583 printk(KERN_INFO "md: (%s was pending)\n",
584 partition_name(rdev->dev));
585 list_del_init(&rdev->pending);
588 md_autodetect_dev(rdev->dev);
595 static void kick_rdev_from_array(mdk_rdev_t * rdev)
597 unbind_rdev_from_array(rdev);
601 static void export_array(mddev_t *mddev)
603 struct md_list_head *tmp;
605 mdp_super_t *sb = mddev->sb;
609 free_page((unsigned long) sb);
612 ITERATE_RDEV(mddev,rdev,tmp) {
617 kick_rdev_from_array(rdev);
619 if (!list_empty(&mddev->disks))
623 static void free_mddev(mddev_t *mddev)
631 md_size[mdidx(mddev)] = 0;
632 md_hd_struct[mdidx(mddev)].nr_sects = 0;
635 * Make sure nobody else is using this mddev
636 * (careful, we rely on the global kernel lock here)
638 while (sem_getcount(&mddev->resync_sem) != 1)
640 while (sem_getcount(&mddev->recovery_sem) != 1)
643 del_mddev_mapping(mddev, mk_kdev(MD_MAJOR, mdidx(mddev)));
644 md_list_del(&mddev->all_mddevs);
654 static void print_desc(mdp_disk_t *desc)
656 printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
657 partition_name(MKDEV(desc->major,desc->minor)),
658 desc->major,desc->minor,desc->raid_disk,desc->state);
661 static void print_sb(mdp_super_t *sb)
665 printk(KERN_INFO "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
666 sb->major_version, sb->minor_version, sb->patch_version,
667 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
669 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", sb->level,
670 sb->size, sb->nr_disks, sb->raid_disks, sb->md_minor,
671 sb->layout, sb->chunk_size);
672 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d FD:%d SD:%d CSUM:%08x E:%08lx\n",
673 sb->utime, sb->state, sb->active_disks, sb->working_disks,
674 sb->failed_disks, sb->spare_disks,
675 sb->sb_csum, (unsigned long)sb->events_lo);
678 for (i = 0; i < MD_SB_DISKS; i++) {
681 desc = sb->disks + i;
682 if (desc->number || desc->major || desc->minor ||
683 desc->raid_disk || (desc->state && (desc->state != 4))) {
684 printk(" D %2d: ", i);
688 printk(KERN_INFO "md: THIS: ");
689 print_desc(&sb->this_disk);
693 static void print_rdev(mdk_rdev_t *rdev)
695 printk(KERN_INFO "md: rdev %s: O:%s, SZ:%08ld F:%d DN:%d ",
696 partition_name(rdev->dev), partition_name(rdev->old_dev),
697 rdev->size, rdev->faulty, rdev->desc_nr);
699 printk(KERN_INFO "md: rdev superblock:\n");
702 printk(KERN_INFO "md: no rdev superblock!\n");
705 void md_print_devices(void)
707 struct md_list_head *tmp, *tmp2;
712 printk("md: **********************************\n");
713 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
714 printk("md: **********************************\n");
715 ITERATE_MDDEV(mddev,tmp) {
716 printk("md%d: ", mdidx(mddev));
718 ITERATE_RDEV(mddev,rdev,tmp2)
719 printk("<%s>", partition_name(rdev->dev));
722 printk(" array superblock:\n");
725 printk(" no array superblock.\n");
727 ITERATE_RDEV(mddev,rdev,tmp2)
730 printk("md: **********************************\n");
734 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
737 mdp_super_t *tmp1, *tmp2;
739 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
740 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
742 if (!tmp1 || !tmp2) {
744 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
752 * nr_disks is not constant
757 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
771 static int uuid_equal(mdk_rdev_t *rdev1, mdk_rdev_t *rdev2)
773 if ( (rdev1->sb->set_uuid0 == rdev2->sb->set_uuid0) &&
774 (rdev1->sb->set_uuid1 == rdev2->sb->set_uuid1) &&
775 (rdev1->sb->set_uuid2 == rdev2->sb->set_uuid2) &&
776 (rdev1->sb->set_uuid3 == rdev2->sb->set_uuid3))
783 static mdk_rdev_t * find_rdev_all(kdev_t dev)
785 struct md_list_head *tmp;
788 list_for_each(tmp, &all_raid_disks) {
789 rdev = md_list_entry(tmp, mdk_rdev_t, all);
790 if (rdev->dev == dev)
796 #define GETBLK_FAILED KERN_ERR \
797 "md: getblk failed for device %s\n"
799 static int write_disk_sb(mdk_rdev_t * rdev)
802 unsigned long sb_offset, size;
812 if (rdev->sb->md_magic != MD_SB_MAGIC) {
818 sb_offset = calc_dev_sboffset(dev, rdev->mddev, 1);
819 if (rdev->sb_offset != sb_offset) {
820 printk(KERN_INFO "%s's sb offset has changed from %ld to %ld, skipping\n",
821 partition_name(dev), rdev->sb_offset, sb_offset);
825 * If the disk went offline meanwhile and it's just a spare, then
826 * its size has changed to zero silently, and the MD code does
827 * not yet know that it's faulty.
829 size = calc_dev_size(dev, rdev->mddev, 1);
830 if (size != rdev->size) {
831 printk(KERN_INFO "%s's size has changed from %ld to %ld since import, skipping\n",
832 partition_name(dev), rdev->size, size);
836 printk(KERN_INFO "(write) %s's sb offset: %ld\n", partition_name(dev), sb_offset);
838 if (!sync_page_io(dev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE)) {
839 printk("md: write_disk_sb failed for device %s\n", partition_name(dev));
847 static void set_this_disk(mddev_t *mddev, mdk_rdev_t *rdev)
852 for (i = 0; i < MD_SB_DISKS; i++) {
853 desc = mddev->sb->disks + i;
855 if (disk_faulty(desc)) {
856 if (MKDEV(desc->major,desc->minor) == rdev->dev)
861 if (MKDEV(desc->major,desc->minor) == rdev->dev) {
862 rdev->sb->this_disk = *desc;
863 rdev->desc_nr = desc->number;
874 static int sync_sbs(mddev_t * mddev)
878 struct md_list_head *tmp;
880 ITERATE_RDEV(mddev,rdev,tmp) {
881 if (rdev->faulty || rdev->alias_device)
885 set_this_disk(mddev, rdev);
886 sb->sb_csum = calc_sb_csum(sb);
891 int md_update_sb(mddev_t * mddev)
893 int err, count = 100;
894 struct md_list_head *tmp;
897 if (!mddev->sb_dirty) {
898 printk("hm, md_update_sb() called without ->sb_dirty == 1, from %p.\n", __builtin_return_address(0));
903 mddev->sb->utime = CURRENT_TIME;
904 if ((++mddev->sb->events_lo)==0)
905 ++mddev->sb->events_hi;
907 if ((mddev->sb->events_lo|mddev->sb->events_hi)==0) {
909 * oops, this 64-bit counter should never wrap.
910 * Either we are in around ~1 trillion A.C., assuming
911 * 1 reboot per second, or we have a bug:
914 mddev->sb->events_lo = mddev->sb->events_hi = 0xffffffff;
919 * do not write anything to disk if using
920 * nonpersistent superblocks
922 if (mddev->sb->not_persistent)
925 printk(KERN_INFO "md: updating md%d RAID superblock on device\n",
929 ITERATE_RDEV(mddev,rdev,tmp) {
930 printk(KERN_INFO "md: ");
932 printk("(skipping faulty ");
933 if (rdev->alias_device)
934 printk("(skipping alias ");
935 if (!rdev->faulty && disk_faulty(&rdev->sb->this_disk)) {
936 printk("(skipping new-faulty %s )\n",
937 partition_name(rdev->dev));
940 printk("%s ", partition_name(rdev->dev));
941 if (!rdev->faulty && !rdev->alias_device) {
942 printk("[events: %08lx]",
943 (unsigned long)rdev->sb->events_lo);
944 err += write_disk_sb(rdev);
950 printk(KERN_ERR "md: errors occurred during superblock update, repeating\n");
953 printk(KERN_ERR "md: excessive errors occurred during superblock update, exiting\n");
959 * Import a device. If 'on_disk', then sanity check the superblock
961 * mark the device faulty if:
963 * - the device is nonexistent (zero size)
964 * - the device has no valid superblock
967 static int md_import_device(kdev_t newdev, int on_disk)
973 if (find_rdev_all(newdev))
976 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
978 printk(KERN_ERR "md: could not alloc mem for %s!\n", partition_name(newdev));
981 memset(rdev, 0, sizeof(*rdev));
983 if (is_mounted(newdev)) {
984 printk(KERN_WARNING "md: can not import %s, has active inodes!\n",
985 partition_name(newdev));
990 if ((err = alloc_disk_sb(rdev)))
994 if (lock_rdev(rdev)) {
995 printk(KERN_ERR "md: could not lock %s, zero-size? Marking faulty.\n",
996 partition_name(newdev));
1004 if (blk_size[MAJOR(newdev)])
1005 size = blk_size[MAJOR(newdev)][MINOR(newdev)];
1007 printk(KERN_WARNING "md: %s has zero size, marking faulty!\n",
1008 partition_name(newdev));
1014 if ((err = read_disk_sb(rdev))) {
1015 printk(KERN_WARNING "md: could not read %s's sb, not importing!\n",
1016 partition_name(newdev));
1019 if ((err = check_disk_sb(rdev))) {
1020 printk(KERN_WARNING "md: %s has invalid sb, not importing!\n",
1021 partition_name(newdev));
1025 if (rdev->sb->level != -4) {
1026 rdev->old_dev = MKDEV(rdev->sb->this_disk.major,
1027 rdev->sb->this_disk.minor);
1028 rdev->desc_nr = rdev->sb->this_disk.number;
1030 rdev->old_dev = MKDEV(0, 0);
1034 md_list_add(&rdev->all, &all_raid_disks);
1035 MD_INIT_LIST_HEAD(&rdev->pending);
1036 INIT_LIST_HEAD(&rdev->same_set);
1051 * Check a full RAID array for plausibility
1054 #define INCONSISTENT KERN_ERR \
1055 "md: fatal superblock inconsistency in %s -- removing from array\n"
1057 #define OUT_OF_DATE KERN_ERR \
1058 "md: superblock update time inconsistency -- using the most recent one\n"
1060 #define OLD_VERSION KERN_ALERT \
1061 "md: md%d: unsupported raid array version %d.%d.%d\n"
1063 #define NOT_CLEAN_IGNORE KERN_ERR \
1064 "md: md%d: raid array is not clean -- starting background reconstruction\n"
1066 #define UNKNOWN_LEVEL KERN_ERR \
1067 "md: md%d: unsupported raid level %d\n"
1069 static int analyze_sbs(mddev_t * mddev)
1071 int out_of_date = 0, i, first;
1072 struct md_list_head *tmp, *tmp2;
1073 mdk_rdev_t *rdev, *rdev2, *freshest;
1077 * Verify the RAID superblock on each real device
1079 ITERATE_RDEV(mddev,rdev,tmp) {
1088 if (check_disk_sb(rdev))
1093 * The superblock constant part has to be the same
1094 * for all disks in the array.
1098 ITERATE_RDEV(mddev,rdev,tmp) {
1103 if (!sb_equal(sb, rdev->sb)) {
1104 printk(INCONSISTENT, partition_name(rdev->dev));
1105 kick_rdev_from_array(rdev);
1111 * OK, we have all disks and the array is ready to run. Let's
1112 * find the freshest superblock, that one will be the superblock
1113 * that represents the whole array.
1116 if (alloc_array_sb(mddev))
1121 ITERATE_RDEV(mddev,rdev,tmp) {
1124 * if the checksum is invalid, use the superblock
1125 * only as a last resort. (decrease it's age by
1128 if (calc_sb_csum(rdev->sb) != rdev->sb->sb_csum) {
1129 if (rdev->sb->events_lo || rdev->sb->events_hi)
1130 if ((rdev->sb->events_lo--)==0)
1131 rdev->sb->events_hi--;
1134 printk(KERN_INFO "md: %s's event counter: %08lx\n",
1135 partition_name(rdev->dev),
1136 (unsigned long)rdev->sb->events_lo);
1142 * Find the newest superblock version
1144 ev1 = md_event(rdev->sb);
1145 ev2 = md_event(freshest->sb);
1153 printk(OUT_OF_DATE);
1154 printk(KERN_INFO "md: freshest: %s\n", partition_name(freshest->dev));
1156 memcpy (sb, freshest->sb, sizeof(*sb));
1159 * at this point we have picked the 'best' superblock
1160 * from all available superblocks.
1161 * now we validate this superblock and kick out possibly
1164 ITERATE_RDEV(mddev,rdev,tmp) {
1166 * Kick all non-fresh devices
1169 ev1 = md_event(rdev->sb);
1173 printk(KERN_WARNING "md: kicking non-fresh %s from array!\n",
1174 partition_name(rdev->dev));
1175 kick_rdev_from_array(rdev);
1181 * Fix up changed device names ... but only if this disk has a
1182 * recent update time. Use faulty checksum ones too.
1184 if (mddev->sb->level != -4)
1185 ITERATE_RDEV(mddev,rdev,tmp) {
1186 __u64 ev1, ev2, ev3;
1187 if (rdev->faulty || rdev->alias_device) {
1191 ev1 = md_event(rdev->sb);
1195 if ((rdev->dev != rdev->old_dev) &&
1196 ((ev1 == ev2) || (ev1 == ev3))) {
1199 printk(KERN_WARNING "md: device name has changed from %s to %s since last import!\n",
1200 partition_name(rdev->old_dev), partition_name(rdev->dev));
1201 if (rdev->desc_nr == -1) {
1205 desc = &sb->disks[rdev->desc_nr];
1206 if (rdev->old_dev != MKDEV(desc->major, desc->minor)) {
1210 desc->major = MAJOR(rdev->dev);
1211 desc->minor = MINOR(rdev->dev);
1212 desc = &rdev->sb->this_disk;
1213 desc->major = MAJOR(rdev->dev);
1214 desc->minor = MINOR(rdev->dev);
1219 * Remove unavailable and faulty devices ...
1221 * note that if an array becomes completely unrunnable due to
1222 * missing devices, we do not write the superblock back, so the
1223 * administrator has a chance to fix things up. The removal thus
1224 * only happens if it's nonfatal to the contents of the array.
1226 for (i = 0; i < MD_SB_DISKS; i++) {
1231 desc = sb->disks + i;
1232 dev = MKDEV(desc->major, desc->minor);
1235 * We kick faulty devices/descriptors immediately.
1237 * Note: multipath devices are a special case. Since we
1238 * were able to read the superblock on the path, we don't
1239 * care if it was previously marked as faulty, it's up now
1242 if (disk_faulty(desc) && mddev->sb->level != -4) {
1244 ITERATE_RDEV(mddev,rdev,tmp) {
1245 if (rdev->desc_nr != desc->number)
1247 printk(KERN_WARNING "md%d: kicking faulty %s!\n",
1248 mdidx(mddev),partition_name(rdev->dev));
1249 kick_rdev_from_array(rdev);
1254 if (dev == MKDEV(0,0))
1256 printk(KERN_WARNING "md%d: removing former faulty %s!\n",
1257 mdidx(mddev), partition_name(dev));
1259 remove_descriptor(desc, sb);
1261 } else if (disk_faulty(desc)) {
1263 * multipath entry marked as faulty, unfaulty it
1265 rdev = find_rdev(mddev, dev);
1267 mark_disk_spare(desc);
1269 remove_descriptor(desc, sb);
1272 if (dev == MKDEV(0,0))
1275 * Is this device present in the rdev ring?
1278 ITERATE_RDEV(mddev,rdev,tmp) {
1280 * Multi-path IO special-case: since we have no
1281 * this_disk descriptor at auto-detect time,
1282 * we cannot check rdev->number.
1283 * We can check the device though.
1285 if ((sb->level == -4) && (rdev->dev ==
1286 MKDEV(desc->major,desc->minor))) {
1290 if (rdev->desc_nr == desc->number) {
1298 printk(KERN_WARNING "md%d: former device %s is unavailable, removing from array!\n",
1299 mdidx(mddev), partition_name(dev));
1300 remove_descriptor(desc, sb);
1304 * Double check wether all devices mentioned in the
1305 * superblock are in the rdev ring.
1308 for (i = 0; i < MD_SB_DISKS; i++) {
1312 desc = sb->disks + i;
1313 dev = MKDEV(desc->major, desc->minor);
1315 if (dev == MKDEV(0,0))
1318 if (disk_faulty(desc)) {
1323 rdev = find_rdev(mddev, dev);
1329 * In the case of Multipath-IO, we have no
1330 * other information source to find out which
1331 * disk is which, only the position of the device
1332 * in the superblock:
1334 if (mddev->sb->level == -4) {
1335 if ((rdev->desc_nr != -1) && (rdev->desc_nr != i)) {
1341 rdev->alias_device = 1;
1348 * Kick all rdevs that are not in the
1351 ITERATE_RDEV(mddev,rdev,tmp) {
1352 if (rdev->desc_nr == -1)
1353 kick_rdev_from_array(rdev);
1357 * Do a final reality check.
1359 if (mddev->sb->level != -4) {
1360 ITERATE_RDEV(mddev,rdev,tmp) {
1361 if (rdev->desc_nr == -1) {
1366 * is the desc_nr unique?
1368 ITERATE_RDEV(mddev,rdev2,tmp2) {
1369 if ((rdev2 != rdev) &&
1370 (rdev2->desc_nr == rdev->desc_nr)) {
1376 * is the device unique?
1378 ITERATE_RDEV(mddev,rdev2,tmp2) {
1379 if ((rdev2 != rdev) &&
1380 (rdev2->dev == rdev->dev)) {
1389 * Check if we can support this RAID array
1391 if (sb->major_version != MD_MAJOR_VERSION ||
1392 sb->minor_version > MD_MINOR_VERSION) {
1394 printk(OLD_VERSION, mdidx(mddev), sb->major_version,
1395 sb->minor_version, sb->patch_version);
1399 if ((sb->state != (1 << MD_SB_CLEAN)) && ((sb->level == 1) ||
1400 (sb->level == 4) || (sb->level == 5)))
1401 printk(NOT_CLEAN_IGNORE, mdidx(mddev));
1413 static int device_size_calculation(mddev_t * mddev)
1415 int data_disks = 0, persistent;
1416 unsigned int readahead;
1417 mdp_super_t *sb = mddev->sb;
1418 struct md_list_head *tmp;
1422 * Do device size calculation. Bail out if too small.
1423 * (we have to do this after having validated chunk_size,
1424 * because device size has to be modulo chunk_size)
1426 persistent = !mddev->sb->not_persistent;
1427 ITERATE_RDEV(mddev,rdev,tmp) {
1434 rdev->size = calc_dev_size(rdev->dev, mddev, persistent);
1435 if (rdev->size < sb->chunk_size / 1024) {
1437 "md: Dev %s smaller than chunk_size: %ldk < %dk\n",
1438 partition_name(rdev->dev),
1439 rdev->size, sb->chunk_size / 1024);
1444 switch (sb->level) {
1455 zoned_raid_size(mddev);
1459 zoned_raid_size(mddev);
1460 data_disks = sb->raid_disks;
1467 data_disks = sb->raid_disks-1;
1470 printk(UNKNOWN_LEVEL, mdidx(mddev), sb->level);
1473 if (!md_size[mdidx(mddev)])
1474 md_size[mdidx(mddev)] = sb->size * data_disks;
1476 readahead = MD_READAHEAD;
1477 if ((sb->level == 0) || (sb->level == 4) || (sb->level == 5)) {
1478 readahead = (mddev->sb->chunk_size>>PAGE_SHIFT) * 4 * data_disks;
1479 if (readahead < data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2)
1480 readahead = data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2;
1482 // (no multipath branch - it uses the default setting)
1483 if (sb->level == -3)
1487 printk(KERN_INFO "md%d: max total readahead window set to %ldk\n",
1488 mdidx(mddev), readahead*(PAGE_SIZE/1024));
1491 "md%d: %d data-disks, max readahead per data-disk: %ldk\n",
1492 mdidx(mddev), data_disks, readahead/data_disks*(PAGE_SIZE/1024));
1499 #define TOO_BIG_CHUNKSIZE KERN_ERR \
1500 "too big chunk_size: %d > %d\n"
1502 #define TOO_SMALL_CHUNKSIZE KERN_ERR \
1503 "too small chunk_size: %d < %ld\n"
1505 #define BAD_CHUNKSIZE KERN_ERR \
1506 "no chunksize specified, see 'man raidtab'\n"
1508 static int do_md_run(mddev_t * mddev)
1512 struct md_list_head *tmp;
1516 if (list_empty(&mddev->disks)) {
1525 * Resize disks to align partitions size on a given
1528 md_size[mdidx(mddev)] = 0;
1531 * Analyze all RAID superblock(s)
1533 if (analyze_sbs(mddev)) {
1538 chunk_size = mddev->sb->chunk_size;
1539 pnum = level_to_pers(mddev->sb->level);
1541 if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1544 * 'default chunksize' in the old md code used to
1545 * be PAGE_SIZE, baaad.
1546 * we abort here to be on the safe side. We dont
1547 * want to continue the bad practice.
1549 printk(BAD_CHUNKSIZE);
1552 if (chunk_size > MAX_CHUNK_SIZE) {
1553 printk(TOO_BIG_CHUNKSIZE, chunk_size, MAX_CHUNK_SIZE);
1557 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1559 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1563 if (chunk_size < PAGE_SIZE) {
1564 printk(TOO_SMALL_CHUNKSIZE, chunk_size, PAGE_SIZE);
1569 printk(KERN_INFO "md: RAID level %d does not need chunksize! Continuing anyway.\n",
1572 if (pnum >= MAX_PERSONALITY) {
1580 char module_name[80];
1581 sprintf (module_name, "md-personality-%d", pnum);
1582 request_module (module_name);
1586 printk(KERN_ERR "md: personality %d is not loaded!\n",
1592 if (device_size_calculation(mddev))
1596 * Drop all container device buffers, from now on
1597 * the only valid external interface is through the md
1599 * Also find largest hardsector size
1601 md_hardsect_sizes[mdidx(mddev)] = 512;
1602 ITERATE_RDEV(mddev,rdev,tmp) {
1605 invalidate_device(rdev->dev, 1);
1606 if (get_hardsect_size(rdev->dev)
1607 > md_hardsect_sizes[mdidx(mddev)])
1608 md_hardsect_sizes[mdidx(mddev)] =
1609 get_hardsect_size(rdev->dev);
1611 md_blocksizes[mdidx(mddev)] = 1024;
1612 if (md_blocksizes[mdidx(mddev)] < md_hardsect_sizes[mdidx(mddev)])
1613 md_blocksizes[mdidx(mddev)] = md_hardsect_sizes[mdidx(mddev)];
1614 mddev->pers = pers[pnum];
1616 blk_queue_make_request(&mddev->queue, mddev->pers->make_request);
1617 mddev->queue.queuedata = mddev;
1619 err = mddev->pers->run(mddev);
1621 printk(KERN_ERR "md: pers->run() failed ...\n");
1626 mddev->sb->state &= ~(1 << MD_SB_CLEAN);
1627 mddev->sb_dirty = 1;
1628 md_update_sb(mddev);
1631 * md_size has units of 1K blocks, which are
1632 * twice as large as sectors.
1634 md_hd_struct[mdidx(mddev)].start_sect = 0;
1635 register_disk(&md_gendisk, MKDEV(MAJOR_NR,mdidx(mddev)),
1636 1, &md_fops, md_size[mdidx(mddev)]<<1);
1638 read_ahead[MD_MAJOR] = 1024;
1642 #undef TOO_BIG_CHUNKSIZE
1643 #undef BAD_CHUNKSIZE
1645 static int restart_array(mddev_t *mddev)
1650 * Complain if it has no devices
1653 if (list_empty(&mddev->disks))
1662 set_device_ro(mddev_to_kdev(mddev), 0);
1665 "md: md%d switched to read-write mode.\n", mdidx(mddev));
1667 * Kick recovery or resync if necessary
1669 md_recover_arrays();
1670 if (mddev->pers->restart_resync)
1671 mddev->pers->restart_resync(mddev);
1674 printk(KERN_ERR "md: md%d has no personality assigned.\n",
1683 #define STILL_MOUNTED KERN_WARNING \
1684 "md: md%d still mounted.\n"
1685 #define STILL_IN_USE \
1686 "md: md%d still in use.\n"
1688 static int do_md_stop(mddev_t * mddev, int ro)
1690 int err = 0, resync_interrupted = 0;
1691 kdev_t dev = mddev_to_kdev(mddev);
1693 if (atomic_read(&mddev->active)>1) {
1694 printk(STILL_IN_USE, mdidx(mddev));
1701 * It is safe to call stop here, it only frees private
1702 * data. Also, it tells us if a device is unstoppable
1703 * (eg. resyncing is in progress)
1705 if (mddev->pers->stop_resync)
1706 if (mddev->pers->stop_resync(mddev))
1707 resync_interrupted = 1;
1709 if (mddev->recovery_running)
1710 md_interrupt_thread(md_recovery_thread);
1713 * This synchronizes with signal delivery to the
1714 * resync or reconstruction thread. It also nicely
1715 * hangs the process if some reconstruction has not
1718 down(&mddev->recovery_sem);
1719 up(&mddev->recovery_sem);
1721 invalidate_device(dev, 1);
1730 set_device_ro(dev, 0);
1731 if (mddev->pers->stop(mddev)) {
1734 set_device_ro(dev, 1);
1742 * mark it clean only if there was no resync
1745 if (!mddev->recovery_running && !resync_interrupted) {
1746 printk(KERN_INFO "md: marking sb clean...\n");
1747 mddev->sb->state |= 1 << MD_SB_CLEAN;
1749 mddev->sb_dirty = 1;
1750 md_update_sb(mddev);
1753 set_device_ro(dev, 1);
1757 * Free resources if final stop
1760 printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev));
1763 printk(KERN_INFO "md: md%d switched to read-only mode.\n", mdidx(mddev));
1770 * We have to safely support old arrays too.
1772 int detect_old_array(mdp_super_t *sb)
1774 if (sb->major_version > 0)
1776 if (sb->minor_version >= 90)
1783 static void autorun_array(mddev_t *mddev)
1786 struct md_list_head *tmp;
1789 if (list_empty(&mddev->disks)) {
1794 printk(KERN_INFO "md: running: ");
1796 ITERATE_RDEV(mddev,rdev,tmp) {
1797 printk("<%s>", partition_name(rdev->dev));
1801 err = do_md_run (mddev);
1803 printk(KERN_WARNING "md :do_md_run() returned %d\n", err);
1805 * prevent the writeback of an unrunnable array
1807 mddev->sb_dirty = 0;
1808 do_md_stop (mddev, 0);
1813 * lets try to run arrays based on all disks that have arrived
1814 * until now. (those are in the ->pending list)
1816 * the method: pick the first pending disk, collect all disks with
1817 * the same UUID, remove all from the pending list and put them into
1818 * the 'same_array' list. Then order this list based on superblock
1819 * update time (freshest comes first), kick out 'old' disks and
1820 * compare superblocks. If everything's fine then run it.
1822 * If "unit" is allocated, then bump its reference count
1824 static void autorun_devices(kdev_t countdev)
1826 struct md_list_head candidates;
1827 struct md_list_head *tmp;
1828 mdk_rdev_t *rdev0, *rdev;
1833 printk(KERN_INFO "md: autorun ...\n");
1834 while (!list_empty(&pending_raid_disks)) {
1835 rdev0 = md_list_entry(pending_raid_disks.next,
1836 mdk_rdev_t, pending);
1838 printk(KERN_INFO "md: considering %s ...\n", partition_name(rdev0->dev));
1839 MD_INIT_LIST_HEAD(&candidates);
1840 ITERATE_RDEV_PENDING(rdev,tmp) {
1841 if (uuid_equal(rdev0, rdev)) {
1842 if (!sb_equal(rdev0->sb, rdev->sb)) {
1844 "md: %s has same UUID as %s, but superblocks differ ...\n",
1845 partition_name(rdev->dev), partition_name(rdev0->dev));
1848 printk(KERN_INFO "md: adding %s ...\n", partition_name(rdev->dev));
1849 md_list_del(&rdev->pending);
1850 md_list_add(&rdev->pending, &candidates);
1854 * now we have a set of devices, with all of them having
1855 * mostly sane superblocks. It's time to allocate the
1858 md_kdev = MKDEV(MD_MAJOR, rdev0->sb->md_minor);
1859 mddev = kdev_to_mddev(md_kdev);
1861 printk(KERN_WARNING "md: md%d already running, cannot run %s\n",
1862 mdidx(mddev), partition_name(rdev0->dev));
1863 ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp)
1867 mddev = alloc_mddev(md_kdev);
1869 printk(KERN_ERR "md: cannot allocate memory for md drive.\n");
1872 if (md_kdev == countdev)
1873 atomic_inc(&mddev->active);
1874 printk(KERN_INFO "md: created md%d\n", mdidx(mddev));
1875 ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) {
1876 bind_rdev_to_array(rdev, mddev);
1877 list_del_init(&rdev->pending);
1879 autorun_array(mddev);
1881 printk(KERN_INFO "md: ... autorun DONE.\n");
1885 * import RAID devices based on one partition
1886 * if possible, the array gets run as well.
1889 #define BAD_VERSION KERN_ERR \
1890 "md: %s has RAID superblock version 0.%d, autodetect needs v0.90 or higher\n"
1892 #define OUT_OF_MEM KERN_ALERT \
1893 "md: out of memory.\n"
1895 #define NO_DEVICE KERN_ERR \
1896 "md: disabled device %s\n"
1898 #define AUTOADD_FAILED KERN_ERR \
1899 "md: auto-adding devices to md%d FAILED (error %d).\n"
1901 #define AUTOADD_FAILED_USED KERN_ERR \
1902 "md: cannot auto-add device %s to md%d, already used.\n"
1904 #define AUTORUN_FAILED KERN_ERR \
1905 "md: auto-running md%d FAILED (error %d).\n"
1907 #define MDDEV_BUSY KERN_ERR \
1908 "md: cannot auto-add to md%d, already running.\n"
1910 #define AUTOADDING KERN_INFO \
1911 "md: auto-adding devices to md%d, based on %s's superblock.\n"
1913 #define AUTORUNNING KERN_INFO \
1914 "md: auto-running md%d.\n"
1916 static int autostart_array(kdev_t startdev, kdev_t countdev)
1918 int err = -EINVAL, i;
1919 mdp_super_t *sb = NULL;
1920 mdk_rdev_t *start_rdev = NULL, *rdev;
1922 if (md_import_device(startdev, 1)) {
1923 printk(KERN_WARNING "md: could not import %s!\n", partition_name(startdev));
1927 start_rdev = find_rdev_all(startdev);
1932 if (start_rdev->faulty) {
1933 printk(KERN_WARNING "md: can not autostart based on faulty %s!\n",
1934 partition_name(startdev));
1937 md_list_add(&start_rdev->pending, &pending_raid_disks);
1939 sb = start_rdev->sb;
1941 err = detect_old_array(sb);
1943 printk(KERN_WARNING "md: array version is too old to be autostarted ,"
1944 "use raidtools 0.90 mkraid --upgrade to upgrade the array "
1945 "without data loss!\n");
1949 for (i = 0; i < MD_SB_DISKS; i++) {
1953 desc = sb->disks + i;
1954 dev = MKDEV(desc->major, desc->minor);
1956 if (dev == MKDEV(0,0))
1958 if (dev == startdev)
1960 if (md_import_device(dev, 1)) {
1961 printk(KERN_WARNING "md: could not import %s, trying to run array nevertheless.\n",
1962 partition_name(dev));
1965 rdev = find_rdev_all(dev);
1970 md_list_add(&rdev->pending, &pending_raid_disks);
1974 * possibly return codes
1976 autorun_devices(countdev);
1981 export_rdev(start_rdev);
1988 #undef AUTOADD_FAILED_USED
1989 #undef AUTOADD_FAILED
1990 #undef AUTORUN_FAILED
1995 static int get_version(void * arg)
1999 ver.major = MD_MAJOR_VERSION;
2000 ver.minor = MD_MINOR_VERSION;
2001 ver.patchlevel = MD_PATCHLEVEL_VERSION;
2003 if (md_copy_to_user(arg, &ver, sizeof(ver)))
2009 #define SET_FROM_SB(x) info.x = mddev->sb->x
2010 static int get_array_info(mddev_t * mddev, void * arg)
2012 mdu_array_info_t info;
2019 SET_FROM_SB(major_version);
2020 SET_FROM_SB(minor_version);
2021 SET_FROM_SB(patch_version);
2025 SET_FROM_SB(nr_disks);
2026 SET_FROM_SB(raid_disks);
2027 SET_FROM_SB(md_minor);
2028 SET_FROM_SB(not_persistent);
2032 SET_FROM_SB(active_disks);
2033 SET_FROM_SB(working_disks);
2034 SET_FROM_SB(failed_disks);
2035 SET_FROM_SB(spare_disks);
2037 SET_FROM_SB(layout);
2038 SET_FROM_SB(chunk_size);
2040 if (md_copy_to_user(arg, &info, sizeof(info)))
2047 #define SET_FROM_SB(x) info.x = mddev->sb->disks[nr].x
2048 static int get_disk_info(mddev_t * mddev, void * arg)
2050 mdu_disk_info_t info;
2056 if (md_copy_from_user(&info, arg, sizeof(info)))
2060 if (nr >= MD_SB_DISKS)
2065 SET_FROM_SB(raid_disk);
2068 if (md_copy_to_user(arg, &info, sizeof(info)))
2075 #define SET_SB(x) mddev->sb->disks[nr].x = info->x
2077 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2079 int err, size, persistent;
2083 dev = MKDEV(info->major,info->minor);
2085 if (find_rdev_all(dev)) {
2086 printk(KERN_WARNING "md: device %s already used in a RAID array!\n",
2087 partition_name(dev));
2091 /* expecting a device which has a superblock */
2092 err = md_import_device(dev, 1);
2094 printk(KERN_WARNING "md: md_import_device returned %d\n", err);
2097 rdev = find_rdev_all(dev);
2102 if (!list_empty(&mddev->disks)) {
2103 mdk_rdev_t *rdev0 = md_list_entry(mddev->disks.next,
2104 mdk_rdev_t, same_set);
2105 if (!uuid_equal(rdev0, rdev)) {
2106 printk(KERN_WARNING "md: %s has different UUID to %s\n",
2107 partition_name(rdev->dev), partition_name(rdev0->dev));
2111 if (!sb_equal(rdev0->sb, rdev->sb)) {
2112 printk(KERN_WARNING "md: %s has same UUID but different superblock to %s\n",
2113 partition_name(rdev->dev), partition_name(rdev0->dev));
2118 bind_rdev_to_array(rdev, mddev);
2123 if (nr >= mddev->sb->nr_disks) {
2135 if ((info->state & (1<<MD_DISK_FAULTY))==0) {
2136 err = md_import_device (dev, 0);
2138 printk(KERN_WARNING "md: error, md_import_device() returned %d\n", err);
2141 rdev = find_rdev_all(dev);
2147 rdev->old_dev = dev;
2148 rdev->desc_nr = info->number;
2150 bind_rdev_to_array(rdev, mddev);
2152 persistent = !mddev->sb->not_persistent;
2154 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2156 size = calc_dev_size(dev, mddev, persistent);
2157 rdev->sb_offset = calc_dev_sboffset(dev, mddev, persistent);
2159 if (!mddev->sb->size || (mddev->sb->size > size))
2160 mddev->sb->size = size;
2164 * sync all other superblocks with the main superblock
2172 static int hot_generate_error(mddev_t * mddev, kdev_t dev)
2174 struct request_queue *q;
2181 printk(KERN_INFO "md: trying to generate %s error in md%d ... \n",
2182 partition_name(dev), mdidx(mddev));
2184 rdev = find_rdev(mddev, dev);
2190 if (rdev->desc_nr == -1) {
2194 disk = &mddev->sb->disks[rdev->desc_nr];
2195 if (!disk_active(disk))
2198 q = blk_get_queue(rdev->dev);
2203 printk(KERN_INFO "md: okay, generating error!\n");
2204 // q->oneshot_error = 1; // disabled for now
2209 static int hot_remove_disk(mddev_t * mddev, kdev_t dev)
2218 printk(KERN_INFO "md: trying to remove %s from md%d ... \n",
2219 partition_name(dev), mdidx(mddev));
2221 if (!mddev->pers->diskop) {
2222 printk(KERN_WARNING "md%d: personality does not support diskops!\n",
2227 rdev = find_rdev(mddev, dev);
2231 if (rdev->desc_nr == -1) {
2235 disk = &mddev->sb->disks[rdev->desc_nr];
2236 if (disk_active(disk))
2239 if (disk_removed(disk))
2242 err = mddev->pers->diskop(mddev, &disk, DISKOP_HOT_REMOVE_DISK);
2251 remove_descriptor(disk, mddev->sb);
2252 kick_rdev_from_array(rdev);
2253 mddev->sb_dirty = 1;
2254 md_update_sb(mddev);
2258 printk(KERN_WARNING "md: cannot remove active disk %s from md%d ... \n",
2259 partition_name(dev), mdidx(mddev));
2263 static int hot_add_disk(mddev_t * mddev, kdev_t dev)
2265 int i, err, persistent;
2273 printk(KERN_INFO "md: trying to hot-add %s to md%d ... \n",
2274 partition_name(dev), mdidx(mddev));
2276 if (!mddev->pers->diskop) {
2277 printk(KERN_WARNING "md%d: personality does not support diskops!\n",
2282 persistent = !mddev->sb->not_persistent;
2284 rdev = find_rdev(mddev, dev);
2288 err = md_import_device (dev, 0);
2290 printk(KERN_WARNING "md: error, md_import_device() returned %d\n", err);
2293 rdev = find_rdev_all(dev);
2299 printk(KERN_WARNING "md: can not hot-add faulty %s disk to md%d!\n",
2300 partition_name(dev), mdidx(mddev));
2304 size = calc_dev_size(dev, mddev, persistent);
2306 if (size < mddev->sb->size) {
2307 printk(KERN_WARNING "md%d: disk size %d blocks < array size %d\n",
2308 mdidx(mddev), size, mddev->sb->size);
2312 bind_rdev_to_array(rdev, mddev);
2315 * The rest should better be atomic, we can have disk failures
2316 * noticed in interrupt contexts ...
2318 rdev->old_dev = dev;
2320 rdev->sb_offset = calc_dev_sboffset(dev, mddev, persistent);
2322 disk = mddev->sb->disks + mddev->sb->raid_disks;
2323 for (i = mddev->sb->raid_disks; i < MD_SB_DISKS; i++) {
2324 disk = mddev->sb->disks + i;
2326 if (!disk->major && !disk->minor)
2328 if (disk_removed(disk))
2331 if (i == MD_SB_DISKS) {
2332 printk(KERN_WARNING "md%d: can not hot-add to full array!\n",
2335 goto abort_unbind_export;
2338 if (disk_removed(disk)) {
2342 if (disk->number != i) {
2345 goto abort_unbind_export;
2351 disk->raid_disk = disk->number;
2352 disk->major = MAJOR(dev);
2353 disk->minor = MINOR(dev);
2355 if (mddev->pers->diskop(mddev, &disk, DISKOP_HOT_ADD_DISK)) {
2358 goto abort_unbind_export;
2361 mark_disk_spare(disk);
2362 mddev->sb->nr_disks++;
2363 mddev->sb->spare_disks++;
2364 mddev->sb->working_disks++;
2366 mddev->sb_dirty = 1;
2367 md_update_sb(mddev);
2370 * Kick recovery, maybe this spare has to be added to the
2371 * array immediately.
2373 md_recover_arrays();
2377 abort_unbind_export:
2378 unbind_rdev_from_array(rdev);
2385 #define SET_SB(x) mddev->sb->x = info->x
2386 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2389 if (alloc_array_sb(mddev))
2392 mddev->sb->major_version = MD_MAJOR_VERSION;
2393 mddev->sb->minor_version = MD_MINOR_VERSION;
2394 mddev->sb->patch_version = MD_PATCHLEVEL_VERSION;
2395 mddev->sb->ctime = CURRENT_TIME;
2402 SET_SB(not_persistent);
2405 SET_SB(active_disks);
2406 SET_SB(working_disks);
2407 SET_SB(failed_disks);
2408 SET_SB(spare_disks);
2413 mddev->sb->md_magic = MD_SB_MAGIC;
2416 * Generate a 128 bit UUID
2418 get_random_bytes(&mddev->sb->set_uuid0, 4);
2419 get_random_bytes(&mddev->sb->set_uuid1, 4);
2420 get_random_bytes(&mddev->sb->set_uuid2, 4);
2421 get_random_bytes(&mddev->sb->set_uuid3, 4);
2427 static int set_disk_faulty(mddev_t *mddev, kdev_t dev)
2431 ret = md_error(mddev, dev);
2435 static int md_ioctl(struct inode *inode, struct file *file,
2436 unsigned int cmd, unsigned long arg)
2440 struct hd_geometry *loc = (struct hd_geometry *) arg;
2441 mddev_t *mddev = NULL;
2444 if (!md_capable_admin())
2447 dev = inode->i_rdev;
2449 if (minor >= MAX_MD_DEVS) {
2455 * Commands dealing with the RAID driver but not any
2461 err = get_version((void *)arg);
2464 case PRINT_RAID_DEBUG:
2483 err = blk_ioctl (dev, cmd, arg);
2490 * Commands creating/starting a new array:
2493 mddev = kdev_to_mddev(dev);
2497 case SET_ARRAY_INFO:
2500 printk(KERN_WARNING "md: array md%d already exists!\n",
2509 case SET_ARRAY_INFO:
2510 mddev = alloc_mddev(dev);
2515 atomic_inc(&mddev->active);
2518 * alloc_mddev() should possibly self-lock.
2520 err = lock_mddev(mddev);
2522 printk(KERN_WARNING "md: ioctl, reason %d, cmd %d\n",
2528 printk(KERN_WARNING "md: array md%d already has a superblock!\n",
2534 mdu_array_info_t info;
2535 if (md_copy_from_user(&info, (void*)arg, sizeof(info))) {
2539 err = set_array_info(mddev, &info);
2541 printk(KERN_WARNING "md: couldnt set array info. %d\n", err);
2549 * possibly make it lock the array ...
2551 err = autostart_array((kdev_t)arg, dev);
2553 printk(KERN_WARNING "md: autostart %s failed!\n",
2554 partition_name((kdev_t)arg));
2563 * Commands querying/configuring an existing array:
2570 err = lock_mddev(mddev);
2572 printk(KERN_INFO "md: ioctl lock interrupted, reason %d, cmd %d\n",err, cmd);
2575 /* if we don't have a superblock yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
2576 if (!mddev->sb && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
2582 * Commands even a read-only array can execute:
2586 case GET_ARRAY_INFO:
2587 err = get_array_info(mddev, (void *)arg);
2591 err = get_disk_info(mddev, (void *)arg);
2594 case RESTART_ARRAY_RW:
2595 err = restart_array(mddev);
2599 if (!(err = do_md_stop (mddev, 0)))
2604 err = do_md_stop (mddev, 1);
2608 * We have a problem here : there is no easy way to give a CHS
2609 * virtual geometry. We currently pretend that we have a 2 heads
2610 * 4 sectors (with a BIG number of cylinders...). This drives
2611 * dosfs just mad... ;-)
2618 err = md_put_user (2, (char *) &loc->heads);
2621 err = md_put_user (4, (char *) &loc->sectors);
2624 err = md_put_user (md_hd_struct[mdidx(mddev)].nr_sects/8,
2625 (short *) &loc->cylinders);
2628 err = md_put_user (md_hd_struct[minor].start_sect,
2629 (long *) &loc->start);
2634 * The remaining ioctls are changing the state of the
2635 * superblock, so we do not allow read-only arrays
2647 mdu_disk_info_t info;
2648 if (md_copy_from_user(&info, (void*)arg, sizeof(info)))
2651 err = add_new_disk(mddev, &info);
2654 case HOT_GENERATE_ERROR:
2655 err = hot_generate_error(mddev, (kdev_t)arg);
2657 case HOT_REMOVE_DISK:
2658 err = hot_remove_disk(mddev, (kdev_t)arg);
2662 err = hot_add_disk(mddev, (kdev_t)arg);
2665 case SET_DISK_FAULTY:
2666 err = set_disk_faulty(mddev, (kdev_t)arg);
2671 err = do_md_run (mddev);
2673 * we have to clean up the mess if
2674 * the array cannot be run for some
2678 mddev->sb_dirty = 0;
2679 if (!do_md_stop (mddev, 0))
2686 printk(KERN_WARNING "md: %s(pid %d) used obsolete MD ioctl, "
2687 "upgrade your software to use new ictls.\n",
2688 current->comm, current->pid);
2696 unlock_mddev(mddev);
2706 static int md_open(struct inode *inode, struct file *file)
2709 * Always succeed, but increment the usage count
2711 mddev_t *mddev = kdev_to_mddev(inode->i_rdev);
2713 atomic_inc(&mddev->active);
2717 static int md_release(struct inode *inode, struct file * file)
2719 mddev_t *mddev = kdev_to_mddev(inode->i_rdev);
2721 atomic_dec(&mddev->active);
2725 static struct block_device_operations md_fops=
2729 release: md_release,
2734 int md_thread(void * arg)
2736 mdk_thread_t *thread = arg;
2746 sprintf(current->comm, thread->name);
2749 thread->tsk = current;
2752 * md_thread is a 'system-thread', it's priority should be very
2753 * high. We avoid resource deadlocks individually in each
2754 * raid personality. (RAID5 does preallocation) We also use RR and
2755 * the very same RT priority as kswapd, thus we will never get
2756 * into a priority inversion deadlock.
2758 * we definitely have to have equal or higher priority than
2759 * bdflush, otherwise bdflush will deadlock if there are too
2760 * many dirty RAID5 blocks.
2762 current->policy = SCHED_OTHER;
2763 current->nice = -20;
2766 complete(thread->event);
2767 while (thread->run) {
2768 void (*run)(void *data);
2770 wait_event_interruptible(thread->wqueue,
2771 test_bit(THREAD_WAKEUP, &thread->flags));
2773 clear_bit(THREAD_WAKEUP, &thread->flags);
2778 run_task_queue(&tq_disk);
2780 if (md_signal_pending(current))
2783 complete(thread->event);
2787 void md_wakeup_thread(mdk_thread_t *thread)
2789 dprintk("md: waking up MD thread %p.\n", thread);
2790 set_bit(THREAD_WAKEUP, &thread->flags);
2791 wake_up(&thread->wqueue);
2794 mdk_thread_t *md_register_thread(void (*run) (void *),
2795 void *data, const char *name)
2797 mdk_thread_t *thread;
2799 struct completion event;
2801 thread = (mdk_thread_t *) kmalloc
2802 (sizeof(mdk_thread_t), GFP_KERNEL);
2806 memset(thread, 0, sizeof(mdk_thread_t));
2807 md_init_waitqueue_head(&thread->wqueue);
2809 init_completion(&event);
2810 thread->event = &event;
2812 thread->data = data;
2813 thread->name = name;
2814 ret = kernel_thread(md_thread, thread, 0);
2819 wait_for_completion(&event);
2823 void md_interrupt_thread(mdk_thread_t *thread)
2829 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
2830 send_sig(SIGKILL, thread->tsk, 1);
2833 void md_unregister_thread(mdk_thread_t *thread)
2835 struct completion event;
2837 init_completion(&event);
2839 thread->event = &event;
2841 thread->name = NULL;
2842 md_interrupt_thread(thread);
2843 wait_for_completion(&event);
2847 void md_recover_arrays(void)
2849 if (!md_recovery_thread) {
2853 md_wakeup_thread(md_recovery_thread);
2857 int md_error(mddev_t *mddev, kdev_t rdev)
2861 dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
2862 MD_MAJOR,mdidx(mddev),MAJOR(rdev),MINOR(rdev),
2863 __builtin_return_address(0),__builtin_return_address(1),
2864 __builtin_return_address(2),__builtin_return_address(3));
2870 rrdev = find_rdev(mddev, rdev);
2871 if (!rrdev || rrdev->faulty)
2873 if (!mddev->pers->error_handler
2874 || mddev->pers->error_handler(mddev,rdev) <= 0) {
2879 * if recovery was running, stop it now.
2881 if (mddev->pers->stop_resync)
2882 mddev->pers->stop_resync(mddev);
2883 if (mddev->recovery_running)
2884 md_interrupt_thread(md_recovery_thread);
2885 md_recover_arrays();
2890 static void status_unused(struct seq_file *seq)
2894 struct md_list_head *tmp;
2896 seq_printf(seq, "unused devices: ");
2898 ITERATE_RDEV_ALL(rdev,tmp) {
2899 if (list_empty(&rdev->same_set)) {
2901 * The device is not yet used by any array.
2904 seq_printf(seq, "%s ",
2905 partition_name(rdev->dev));
2909 seq_printf(seq, "<none>");
2911 seq_printf(seq, "\n");
2915 static void status_resync(struct seq_file *seq, mddev_t * mddev)
2917 unsigned long max_blocks, resync, res, dt, db, rt;
2919 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
2920 max_blocks = mddev->sb->size;
2923 * Should not happen.
2928 res = (resync/1024)*1000/(max_blocks/1024 + 1);
2930 int i, x = res/50, y = 20-x;
2931 seq_printf(seq, "[");
2932 for (i = 0; i < x; i++)
2933 seq_printf(seq, "=");
2934 seq_printf(seq, ">");
2935 for (i = 0; i < y; i++)
2936 seq_printf(seq, ".");
2937 seq_printf(seq, "] ");
2939 if (!mddev->recovery_running)
2943 seq_printf(seq, " resync =%3lu.%lu%% (%lu/%lu)",
2944 res/10, res % 10, resync, max_blocks);
2949 seq_printf(seq, " recovery =%3lu.%lu%% (%lu/%lu)",
2950 res/10, res % 10, resync, max_blocks);
2953 * We do not want to overflow, so the order of operands and
2954 * the * 100 / 100 trick are important. We do a +1 to be
2955 * safe against division by zero. We only estimate anyway.
2957 * dt: time from mark until now
2958 * db: blocks written from mark until now
2959 * rt: remaining time
2961 dt = ((jiffies - mddev->resync_mark) / HZ);
2963 db = resync - (mddev->resync_mark_cnt/2);
2964 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
2966 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
2968 seq_printf(seq, " speed=%ldK/sec", db/dt);
2973 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
2975 struct list_head *tmp;
2985 list_for_each(tmp,&all_mddevs)
2987 mddev = list_entry(tmp, mddev_t, all_mddevs);
2990 return (void*)2;/* tail */
2993 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2995 struct list_head *tmp;
2996 mddev_t *next_mddev, *mddev = v;
3003 tmp = all_mddevs.next;
3005 tmp = mddev->all_mddevs.next;
3006 if (tmp != &all_mddevs)
3007 next_mddev = list_entry(tmp,mddev_t,all_mddevs);
3009 next_mddev = (void*)2;
3017 static void md_seq_stop(struct seq_file *seq, void *v)
3022 static int md_seq_show(struct seq_file *seq, void *v)
3025 struct md_list_head *tmp2;
3029 if (v == (void*)1) {
3030 seq_printf(seq, "Personalities : ");
3031 for (j = 0; j < MAX_PERSONALITY; j++)
3033 seq_printf(seq, "[%s] ", pers[j]->name);
3035 seq_printf(seq, "\n");
3036 seq_printf(seq, "read_ahead ");
3037 if (read_ahead[MD_MAJOR] == INT_MAX)
3038 seq_printf(seq, "not set\n");
3040 seq_printf(seq, "%d sectors\n", read_ahead[MD_MAJOR]);
3043 if (v == (void*)2) {
3048 seq_printf(seq, "md%d : %sactive", mdidx(mddev),
3049 mddev->pers ? "" : "in");
3052 seq_printf(seq, " (read-only)");
3053 seq_printf(seq, " %s", mddev->pers->name);
3057 ITERATE_RDEV(mddev,rdev,tmp2) {
3058 seq_printf(seq, " %s[%d]",
3059 partition_name(rdev->dev), rdev->desc_nr);
3061 seq_printf(seq, "(F)");
3067 if (!list_empty(&mddev->disks)) {
3069 seq_printf(seq, "\n %d blocks",
3070 md_size[mdidx(mddev)]);
3072 seq_printf(seq, "\n %d blocks", size);
3077 mddev->pers->status (seq, mddev);
3079 seq_printf(seq, "\n ");
3080 if (mddev->curr_resync) {
3081 status_resync (seq, mddev);
3083 if (sem_getcount(&mddev->resync_sem) != 1)
3084 seq_printf(seq, " resync=DELAYED");
3087 seq_printf(seq, "\n");
3093 static struct seq_operations md_seq_ops = {
3094 .start = md_seq_start,
3095 .next = md_seq_next,
3096 .stop = md_seq_stop,
3097 .show = md_seq_show,
3100 static int md_seq_open(struct inode *inode, struct file *file)
3104 error = seq_open(file, &md_seq_ops);
3108 static struct file_operations md_seq_fops = {
3109 .open = md_seq_open,
3111 .llseek = seq_lseek,
3112 .release = seq_release,
3116 int register_md_personality(int pnum, mdk_personality_t *p)
3118 if (pnum >= MAX_PERSONALITY) {
3129 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3133 int unregister_md_personality(int pnum)
3135 if (pnum >= MAX_PERSONALITY) {
3140 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3145 mdp_disk_t *get_spare(mddev_t *mddev)
3147 mdp_super_t *sb = mddev->sb;
3150 struct md_list_head *tmp;
3152 ITERATE_RDEV(mddev,rdev,tmp) {
3159 disk = &sb->disks[rdev->desc_nr];
3160 if (disk_faulty(disk)) {
3164 if (disk_active(disk))
3171 static unsigned int sync_io[DK_MAX_MAJOR][DK_MAX_DISK];
3172 void md_sync_acct(kdev_t dev, unsigned long nr_sectors)
3174 unsigned int major = MAJOR(dev);
3177 index = disk_index(dev);
3178 if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
3181 sync_io[major][index] += nr_sectors;
3184 static int is_mddev_idle(mddev_t *mddev)
3187 struct md_list_head *tmp;
3189 unsigned long curr_events;
3192 ITERATE_RDEV(mddev,rdev,tmp) {
3193 int major = MAJOR(rdev->dev);
3194 int idx = disk_index(rdev->dev);
3196 if ((idx >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
3199 curr_events = kstat.dk_drive_rblk[major][idx] +
3200 kstat.dk_drive_wblk[major][idx] ;
3201 curr_events -= sync_io[major][idx];
3202 if ((curr_events - rdev->last_events) > 32) {
3203 rdev->last_events = curr_events;
3210 MD_DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3212 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3214 /* another "blocks" (512byte) blocks have been synced */
3215 atomic_sub(blocks, &mddev->recovery_active);
3216 wake_up(&mddev->recovery_wait);
3218 // stop recovery, signal do_sync ....
3219 if (mddev->pers->stop_resync)
3220 mddev->pers->stop_resync(mddev);
3221 if (mddev->recovery_running)
3222 md_interrupt_thread(md_recovery_thread);
3226 #define SYNC_MARKS 10
3227 #define SYNC_MARK_STEP (3*HZ)
3228 int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
3231 unsigned int max_sectors, currspeed,
3232 j, window, err, serialize;
3233 unsigned long mark[SYNC_MARKS];
3234 unsigned long mark_cnt[SYNC_MARKS];
3236 struct md_list_head *tmp;
3237 unsigned long last_check;
3240 err = down_interruptible(&mddev->resync_sem);
3246 ITERATE_MDDEV(mddev2,tmp) {
3247 if (mddev2 == mddev)
3249 if (mddev2->curr_resync && match_mddev_units(mddev,mddev2)) {
3250 printk(KERN_INFO "md: delaying resync of md%d until md%d "
3251 "has finished resync (they share one or more physical units)\n",
3252 mdidx(mddev), mdidx(mddev2));
3258 interruptible_sleep_on(&resync_wait);
3259 if (md_signal_pending(current)) {
3267 mddev->curr_resync = 1;
3269 max_sectors = mddev->sb->size<<1;
3271 printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
3272 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d KB/sec/disc.\n",
3273 sysctl_speed_limit_min);
3274 printk(KERN_INFO "md: using maximum available idle IO bandwith "
3275 "(but not more than %d KB/sec) for reconstruction.\n",
3276 sysctl_speed_limit_max);
3279 * Resync has low priority.
3283 is_mddev_idle(mddev); /* this also initializes IO event counters */
3284 for (m = 0; m < SYNC_MARKS; m++) {
3289 mddev->resync_mark = mark[last_mark];
3290 mddev->resync_mark_cnt = mark_cnt[last_mark];
3293 * Tune reconstruction:
3295 window = vm_max_readahead*(PAGE_SIZE/512);
3296 printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
3297 window/2,max_sectors/2);
3299 atomic_set(&mddev->recovery_active, 0);
3300 init_waitqueue_head(&mddev->recovery_wait);
3302 for (j = 0; j < max_sectors;) {
3305 sectors = mddev->pers->sync_request(mddev, j);
3311 atomic_add(sectors, &mddev->recovery_active);
3313 mddev->curr_resync = j;
3315 if (last_check + window > j)
3320 run_task_queue(&tq_disk);
3323 if (jiffies >= mark[last_mark] + SYNC_MARK_STEP ) {
3325 int next = (last_mark+1) % SYNC_MARKS;
3327 mddev->resync_mark = mark[next];
3328 mddev->resync_mark_cnt = mark_cnt[next];
3329 mark[next] = jiffies;
3330 mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
3335 if (md_signal_pending(current)) {
3337 * got a signal, exit.
3339 mddev->curr_resync = 0;
3340 printk(KERN_INFO "md: md_do_sync() got signal ... exiting\n");
3347 * this loop exits only if either when we are slower than
3348 * the 'hard' speed limit, or the system was IO-idle for
3350 * the system might be non-idle CPU-wise, but we only care
3351 * about not overloading the IO subsystem. (things like an
3352 * e2fsck being done on the RAID array should execute fast)
3354 if (md_need_resched(current))
3357 currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
3359 if (currspeed > sysctl_speed_limit_min) {
3362 if ((currspeed > sysctl_speed_limit_max) ||
3363 !is_mddev_idle(mddev)) {
3364 current->state = TASK_INTERRUPTIBLE;
3365 md_schedule_timeout(HZ/4);
3369 current->nice = -20;
3371 printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
3374 * this also signals 'finished resyncing' to md_stop
3377 wait_disk_event(mddev->recovery_wait, atomic_read(&mddev->recovery_active)==0);
3378 up(&mddev->resync_sem);
3380 mddev->curr_resync = 0;
3381 wake_up(&resync_wait);
3387 * This is a kernel thread which syncs a spare disk with the active array
3389 * the amount of foolproofing might seem to be a tad excessive, but an
3390 * early (not so error-safe) version of raid1syncd synced the first 0.5 gigs
3391 * of my root partition with the first 0.5 gigs of my /home partition ... so
3392 * i'm a bit nervous ;)
3394 void md_do_recovery(void *data)
3400 struct md_list_head *tmp;
3402 printk(KERN_INFO "md: recovery thread got woken up ...\n");
3404 ITERATE_MDDEV(mddev,tmp) {
3408 if (mddev->recovery_running)
3410 if (sb->active_disks == sb->raid_disks)
3412 if (mddev->sb_dirty)
3413 md_update_sb(mddev);
3414 if (!sb->spare_disks) {
3415 printk(KERN_ERR "md%d: no spare disk to reconstruct array! "
3416 "-- continuing in degraded mode\n", mdidx(mddev));
3420 * now here we get the spare and resync it.
3422 spare = get_spare(mddev);
3425 printk(KERN_INFO "md%d: resyncing spare disk %s to replace failed disk\n",
3426 mdidx(mddev), partition_name(MKDEV(spare->major,spare->minor)));
3427 if (!mddev->pers->diskop)
3429 if (mddev->pers->diskop(mddev, &spare, DISKOP_SPARE_WRITE))
3431 down(&mddev->recovery_sem);
3432 mddev->recovery_running = 1;
3433 err = md_do_sync(mddev, spare);
3435 printk(KERN_INFO "md%d: spare disk %s failed, skipping to next spare.\n",
3436 mdidx(mddev), partition_name(MKDEV(spare->major,spare->minor)));
3437 if (!disk_faulty(spare)) {
3438 mddev->pers->diskop(mddev,&spare,DISKOP_SPARE_INACTIVE);
3439 mark_disk_faulty(spare);
3440 mark_disk_nonsync(spare);
3441 mark_disk_inactive(spare);
3443 sb->working_disks--;
3447 if (disk_faulty(spare))
3448 mddev->pers->diskop(mddev, &spare,
3449 DISKOP_SPARE_INACTIVE);
3450 if (err == -EINTR || err == -ENOMEM) {
3452 * Recovery got interrupted, or ran out of mem ...
3453 * signal back that we have finished using the array.
3455 mddev->pers->diskop(mddev, &spare,
3456 DISKOP_SPARE_INACTIVE);
3457 up(&mddev->recovery_sem);
3458 mddev->recovery_running = 0;
3461 mddev->recovery_running = 0;
3462 up(&mddev->recovery_sem);
3464 if (!disk_faulty(spare)) {
3466 * the SPARE_ACTIVE diskop possibly changes the
3469 mddev->pers->diskop(mddev, &spare, DISKOP_SPARE_ACTIVE);
3470 mark_disk_sync(spare);
3471 mark_disk_active(spare);
3475 mddev->sb_dirty = 1;
3476 md_update_sb(mddev);
3479 printk(KERN_INFO "md: recovery thread finished ...\n");
3483 int md_notify_reboot(struct notifier_block *this,
3484 unsigned long code, void *x)
3486 struct md_list_head *tmp;
3489 if ((code == MD_SYS_DOWN) || (code == MD_SYS_HALT)
3490 || (code == MD_SYS_POWER_OFF)) {
3492 printk(KERN_INFO "md: stopping all md devices.\n");
3494 ITERATE_MDDEV(mddev,tmp)
3495 do_md_stop (mddev, 1);
3497 * certain more exotic SCSI devices are known to be
3498 * volatile wrt too early system reboots. While the
3499 * right place to handle this issue is the given
3500 * driver, we do want to have a safe RAID driver ...
3507 struct notifier_block md_notifier = {
3508 notifier_call: md_notify_reboot,
3510 priority: INT_MAX, /* before any real devices */
3513 static void md_geninit(void)
3515 struct proc_dir_entry *p;
3518 for(i = 0; i < MAX_MD_DEVS; i++) {
3519 md_blocksizes[i] = 1024;
3521 md_hardsect_sizes[i] = 512;
3523 blksize_size[MAJOR_NR] = md_blocksizes;
3524 blk_size[MAJOR_NR] = md_size;
3525 max_readahead[MAJOR_NR] = md_maxreadahead;
3526 hardsect_size[MAJOR_NR] = md_hardsect_sizes;
3528 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3530 #ifdef CONFIG_PROC_FS
3531 p = create_proc_entry("mdstat", S_IRUGO, NULL);
3533 p->proc_fops = &md_seq_fops;
3537 request_queue_t * md_queue_proc(kdev_t dev)
3539 mddev_t *mddev = kdev_to_mddev(dev);
3541 return BLK_DEFAULT_QUEUE(MAJOR_NR);
3543 return &mddev->queue;
3546 int md__init md_init(void)
3548 static char * name = "mdrecoveryd";
3551 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d, MD_SB_DISKS=%d\n",
3552 MD_MAJOR_VERSION, MD_MINOR_VERSION,
3553 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3555 if (devfs_register_blkdev (MAJOR_NR, "md", &md_fops))
3557 printk(KERN_ALERT "md: Unable to get major %d for md\n", MAJOR_NR);
3560 devfs_handle = devfs_mk_dir (NULL, "md", NULL);
3561 /* we don't use devfs_register_series because we want to fill md_hd_struct */
3562 for (minor=0; minor < MAX_MD_DEVS; ++minor) {
3564 sprintf (devname, "%u", minor);
3565 md_hd_struct[minor].de = devfs_register (devfs_handle,
3566 devname, DEVFS_FL_DEFAULT, MAJOR_NR, minor,
3567 S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL);
3570 /* all requests on an uninitialised device get failed... */
3571 blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_fail_request);
3572 blk_dev[MAJOR_NR].queue = md_queue_proc;
3575 read_ahead[MAJOR_NR] = INT_MAX;
3577 add_gendisk(&md_gendisk);
3579 md_recovery_thread = md_register_thread(md_do_recovery, NULL, name);
3580 if (!md_recovery_thread)
3581 printk(KERN_ALERT "md: bug: couldn't allocate md_recovery_thread\n");
3583 md_register_reboot_notifier(&md_notifier);
3584 raid_table_header = register_sysctl_table(raid_root_table, 1);
3594 * When md (and any require personalities) are compiled into the kernel
3595 * (not a module), arrays can be assembles are boot time using with AUTODETECT
3596 * where specially marked partitions are registered with md_autodetect_dev(),
3597 * and with MD_BOOT where devices to be collected are given on the boot line
3599 * The code for that is here.
3605 } raid_setup_args md__initdata;
3608 * Searches all registered partitions for autorun RAID arrays
3611 static kdev_t detected_devices[128];
3614 void md_autodetect_dev(kdev_t dev)
3616 if (dev_cnt >= 0 && dev_cnt < 127)
3617 detected_devices[dev_cnt++] = dev;
3621 static void autostart_arrays(void)
3626 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
3628 for (i = 0; i < dev_cnt; i++) {
3629 kdev_t dev = detected_devices[i];
3631 if (md_import_device(dev,1)) {
3632 printk(KERN_ALERT "md: could not import %s!\n",
3633 partition_name(dev));
3639 rdev = find_rdev_all(dev);
3648 md_list_add(&rdev->pending, &pending_raid_disks);
3652 autorun_devices(-1);
3656 char device_set [MAX_MD_DEVS];
3657 int pers[MAX_MD_DEVS];
3658 int chunk[MAX_MD_DEVS];
3659 char *device_names[MAX_MD_DEVS];
3660 } md_setup_args md__initdata;
3663 * Parse the command-line parameters given our kernel, but do not
3664 * actually try to invoke the MD device now; that is handled by
3665 * md_setup_drive after the low-level disk drivers have initialised.
3667 * 27/11/1999: Fixed to work correctly with the 2.3 kernel (which
3668 * assigns the task of parsing integer arguments to the
3669 * invoked program now). Added ability to initialise all
3670 * the MD devices (by specifying multiple "md=" lines)
3671 * instead of just one. -- KTK
3672 * 18May2000: Added support for persistant-superblock arrays:
3673 * md=n,0,factor,fault,device-list uses RAID0 for device n
3674 * md=n,-1,factor,fault,device-list uses LINEAR for device n
3675 * md=n,device-list reads a RAID superblock from the devices
3676 * elements in device-list are read by name_to_kdev_t so can be
3677 * a hex number or something like /dev/hda1 /dev/sdb
3678 * 2001-06-03: Dave Cinege <dcinege@psychosis.com>
3679 * Shifted name_to_kdev_t() and related operations to md_set_drive()
3680 * for later execution. Rewrote section to make devfs compatible.
3682 static int md__init md_setup(char *str)
3684 int minor, level, factor, fault;
3688 if (get_option(&str, &minor) != 2) { /* MD Number */
3689 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
3692 if (minor >= MAX_MD_DEVS) {
3693 printk(KERN_WARNING "md: md=%d, Minor device number too high.\n", minor);
3695 } else if (md_setup_args.device_names[minor]) {
3696 printk(KERN_WARNING "md: md=%d, Specified more then once. "
3697 "Replacing previous definition.\n", minor);
3699 switch (get_option(&str, &level)) { /* RAID Personality */
3700 case 2: /* could be 0 or -1.. */
3701 if (level == 0 || level == -1) {
3702 if (get_option(&str, &factor) != 2 || /* Chunk Size */
3703 get_option(&str, &fault) != 2) {
3704 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
3707 md_setup_args.pers[minor] = level;
3708 md_setup_args.chunk[minor] = 1 << (factor+12);
3720 "md: The kernel has not been configured for raid%d support!\n",
3724 md_setup_args.pers[minor] = level;
3728 case 1: /* the first device is numeric */
3732 md_setup_args.pers[minor] = 0;
3733 pername="super-block";
3736 printk(KERN_INFO "md: Will configure md%d (%s) from %s, below.\n",
3737 minor, pername, str);
3738 md_setup_args.device_names[minor] = str;
3743 extern kdev_t name_to_kdev_t(char *line) md__init;
3744 void md__init md_setup_drive(void)
3749 kdev_t devices[MD_SB_DISKS+1];
3751 for (minor = 0; minor < MAX_MD_DEVS; minor++) {
3754 mdu_disk_info_t dinfo;
3756 if ((devname = md_setup_args.device_names[minor]) == 0) continue;
3758 for (i = 0; i < MD_SB_DISKS && devname != 0; i++) {
3763 p = strchr(devname, ',');
3767 dev = name_to_kdev_t(devname);
3768 handle = devfs_find_handle(NULL, devname, MAJOR (dev), MINOR (dev),
3769 DEVFS_SPECIAL_BLK, 1);
3771 unsigned major, minor;
3772 devfs_get_maj_min(handle, &major, &minor);
3773 dev = MKDEV(major, minor);
3776 printk(KERN_WARNING "md: Unknown device name: %s\n", devname);
3781 md_setup_args.device_set[minor] = 1;
3787 if (md_setup_args.device_set[minor] == 0)
3790 if (mddev_map[minor]) {
3792 "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n",
3796 printk(KERN_INFO "md: Loading md%d: %s\n", minor, md_setup_args.device_names[minor]);
3798 mddev = alloc_mddev(MKDEV(MD_MAJOR,minor));
3800 printk(KERN_ERR "md: kmalloc failed - cannot start array %d\n", minor);
3803 if (md_setup_args.pers[minor]) {
3804 /* non-persistent */
3805 mdu_array_info_t ainfo;
3806 ainfo.level = pers_to_level(md_setup_args.pers[minor]);
3809 ainfo.raid_disks =0;
3810 ainfo.md_minor =minor;
3811 ainfo.not_persistent = 1;
3813 ainfo.state = (1 << MD_SB_CLEAN);
3814 ainfo.active_disks = 0;
3815 ainfo.working_disks = 0;
3816 ainfo.failed_disks = 0;
3817 ainfo.spare_disks = 0;
3819 ainfo.chunk_size = md_setup_args.chunk[minor];
3820 err = set_array_info(mddev, &ainfo);
3821 for (i = 0; !err && (dev = devices[i]); i++) {
3823 dinfo.raid_disk = i;
3824 dinfo.state = (1<<MD_DISK_ACTIVE)|(1<<MD_DISK_SYNC);
3825 dinfo.major = MAJOR(dev);
3826 dinfo.minor = MINOR(dev);
3827 mddev->sb->nr_disks++;
3828 mddev->sb->raid_disks++;
3829 mddev->sb->active_disks++;
3830 mddev->sb->working_disks++;
3831 err = add_new_disk (mddev, &dinfo);
3835 for (i = 0; (dev = devices[i]); i++) {
3836 dinfo.major = MAJOR(dev);
3837 dinfo.minor = MINOR(dev);
3838 add_new_disk (mddev, &dinfo);
3842 err = do_md_run(mddev);
3844 mddev->sb_dirty = 0;
3845 do_md_stop(mddev, 0);
3846 printk(KERN_WARNING "md: starting md%d failed\n", minor);
3851 static int md__init raid_setup(char *str)
3855 len = strlen(str) + 1;
3859 char *comma = strchr(str+pos, ',');
3862 wlen = (comma-str)-pos;
3863 else wlen = (len-1)-pos;
3865 if (strncmp(str, "noautodetect", wlen) == 0)
3866 raid_setup_args.noautodetect = 1;
3869 raid_setup_args.set = 1;
3873 int md__init md_run_setup(void)
3875 if (raid_setup_args.noautodetect)
3876 printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
3883 __setup("raid=", raid_setup);
3884 __setup("md=", md_setup);
3886 __initcall(md_init);
3887 __initcall(md_run_setup);
3889 #else /* It is a MODULE */
3891 int init_module(void)
3896 static void free_device_names(void)
3898 while (!list_empty(&device_names)) {
3899 struct dname *tmp = list_entry(device_names.next,
3901 list_del(&tmp->list);
3907 void cleanup_module(void)
3909 md_unregister_thread(md_recovery_thread);
3910 devfs_unregister(devfs_handle);
3912 devfs_unregister_blkdev(MAJOR_NR,"md");
3913 unregister_reboot_notifier(&md_notifier);
3914 unregister_sysctl_table(raid_table_header);
3915 #ifdef CONFIG_PROC_FS
3916 remove_proc_entry("mdstat", NULL);
3919 del_gendisk(&md_gendisk);
3921 blk_dev[MAJOR_NR].queue = NULL;
3922 blksize_size[MAJOR_NR] = NULL;
3923 blk_size[MAJOR_NR] = NULL;
3924 max_readahead[MAJOR_NR] = NULL;
3925 hardsect_size[MAJOR_NR] = NULL;
3927 free_device_names();
3932 MD_EXPORT_SYMBOL(md_size);
3933 MD_EXPORT_SYMBOL(register_md_personality);
3934 MD_EXPORT_SYMBOL(unregister_md_personality);
3935 MD_EXPORT_SYMBOL(partition_name);
3936 MD_EXPORT_SYMBOL(md_error);
3937 MD_EXPORT_SYMBOL(md_do_sync);
3938 MD_EXPORT_SYMBOL(md_sync_acct);
3939 MD_EXPORT_SYMBOL(md_done_sync);
3940 MD_EXPORT_SYMBOL(md_recover_arrays);
3941 MD_EXPORT_SYMBOL(md_register_thread);
3942 MD_EXPORT_SYMBOL(md_unregister_thread);
3943 MD_EXPORT_SYMBOL(md_update_sb);
3944 MD_EXPORT_SYMBOL(md_wakeup_thread);
3945 MD_EXPORT_SYMBOL(md_print_devices);
3946 MD_EXPORT_SYMBOL(find_rdev_nr);
3947 MD_EXPORT_SYMBOL(md_interrupt_thread);
3948 EXPORT_SYMBOL(mddev_map);
3949 MODULE_LICENSE("GPL");