2 * mdadm - manage Linux "md" devices aka RAID arrays.
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Email: <neilb@suse.de>
28 /* To restripe, we read from old geometry to a buffer, and
29 * read from buffer to new geometry.
30 * When reading, we might have missing devices and so could need
32 * When writing, we need to create correct parity and Q.
36 int geo_map(int block, unsigned long long stripe, int raid_disks,
37 int level, int layout)
39 /* On the given stripe, find which disk in the array will have
40 * block numbered 'block'.
41 * '-1' means the parity block.
42 * '-2' means the Q syndrome.
46 /* layout is not relevant for raid0 and raid4 */
51 switch(level*100 + layout) {
54 case 500 + ALGORITHM_PARITY_N:
55 /* raid 4 isn't messed around by parity blocks */
57 return raid_disks-1; /* parity block */
59 case 500 + ALGORITHM_LEFT_ASYMMETRIC:
60 pd = (raid_disks-1) - stripe % raid_disks;
61 if (block == -1) return pd;
66 case 500 + ALGORITHM_RIGHT_ASYMMETRIC:
67 pd = stripe % raid_disks;
68 if (block == -1) return pd;
73 case 500 + ALGORITHM_LEFT_SYMMETRIC:
74 pd = (raid_disks - 1) - stripe % raid_disks;
75 if (block == -1) return pd;
76 return (pd + 1 + block) % raid_disks;
78 case 500 + ALGORITHM_RIGHT_SYMMETRIC:
79 pd = stripe % raid_disks;
80 if (block == -1) return pd;
81 return (pd + 1 + block) % raid_disks;
83 case 500 + ALGORITHM_PARITY_0:
86 case 600 + ALGORITHM_PARITY_N_6:
88 return raid_disks - 1;
90 return raid_disks - 2; /* parity block */
92 case 600 + ALGORITHM_LEFT_ASYMMETRIC_6:
94 return raid_disks - 1;
96 pd = (raid_disks-1) - stripe % raid_disks;
97 if (block == -1) return pd;
102 case 600 + ALGORITHM_RIGHT_ASYMMETRIC_6:
104 return raid_disks - 1;
106 pd = stripe % raid_disks;
107 if (block == -1) return pd;
112 case 600 + ALGORITHM_LEFT_SYMMETRIC_6:
114 return raid_disks - 1;
116 pd = (raid_disks - 1) - stripe % raid_disks;
117 if (block == -1) return pd;
118 return (pd + 1 + block) % raid_disks;
120 case 600 + ALGORITHM_RIGHT_SYMMETRIC_6:
122 return raid_disks - 1;
124 pd = stripe % raid_disks;
125 if (block == -1) return pd;
126 return (pd + 1 + block) % raid_disks;
128 case 600 + ALGORITHM_PARITY_0_6:
130 return raid_disks - 1;
133 case 600 + ALGORITHM_PARITY_0:
140 case 600 + ALGORITHM_LEFT_ASYMMETRIC:
141 pd = raid_disks - 1 - (stripe % raid_disks);
142 if (block == -1) return pd;
143 if (block == -2) return (pd+1) % raid_disks;
144 if (pd == raid_disks - 1)
150 case 600 + ALGORITHM_ROTATING_ZERO_RESTART:
151 /* Different order for calculating Q, otherwize same as ... */
152 case 600 + ALGORITHM_RIGHT_ASYMMETRIC:
153 pd = stripe % raid_disks;
154 if (block == -1) return pd;
155 if (block == -2) return (pd+1) % raid_disks;
156 if (pd == raid_disks - 1)
162 case 600 + ALGORITHM_LEFT_SYMMETRIC:
163 pd = raid_disks - 1 - (stripe % raid_disks);
164 if (block == -1) return pd;
165 if (block == -2) return (pd+1) % raid_disks;
166 return (pd + 2 + block) % raid_disks;
168 case 600 + ALGORITHM_RIGHT_SYMMETRIC:
169 pd = stripe % raid_disks;
170 if (block == -1) return pd;
171 if (block == -2) return (pd+1) % raid_disks;
172 return (pd + 2 + block) % raid_disks;
174 case 600 + ALGORITHM_ROTATING_N_RESTART:
175 /* Same a left_asymmetric, by first stripe is
176 * D D D P Q rather than
179 pd = raid_disks - 1 - ((stripe + 1) % raid_disks);
180 if (block == -1) return pd;
181 if (block == -2) return (pd+1) % raid_disks;
182 if (pd == raid_disks - 1)
188 case 600 + ALGORITHM_ROTATING_N_CONTINUE:
189 /* Same as left_symmetric but Q is before P */
190 pd = raid_disks - 1 - (stripe % raid_disks);
191 if (block == -1) return pd;
192 if (block == -2) return (pd+raid_disks-1) % raid_disks;
193 return (pd + 1 + block) % raid_disks;
198 int is_ddf(int layout)
204 case ALGORITHM_ROTATING_N_CONTINUE:
205 case ALGORITHM_ROTATING_N_RESTART:
206 case ALGORITHM_ROTATING_ZERO_RESTART:
211 void xor_blocks(char *target, char **sources, int disks, int size)
214 /* Amazingly inefficient... */
215 for (i=0; i<size; i++) {
217 for (j=0 ; j<disks; j++)
223 void qsyndrome(uint8_t *p, uint8_t *q, uint8_t **sources, int disks, int size)
226 uint8_t wq0, wp0, wd0, w10, w20;
227 for ( d = 0; d < size; d++) {
228 wq0 = wp0 = sources[disks-1][d];
229 for ( z = disks-2 ; z >= 0 ; z-- ) {
232 w20 = (wq0&0x80) ? 0xff : 0x00;
233 w10 = (wq0 << 1) & 0xff;
244 * The following was taken from linux/drivers/md/mktables.c, and modified
245 * to create in-memory tables rather than C code
247 static uint8_t gfmul(uint8_t a, uint8_t b)
254 a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
261 static uint8_t gfpow(uint8_t a, int b)
279 int tables_ready = 0;
280 uint8_t raid6_gfmul[256][256];
281 uint8_t raid6_gfexp[256];
282 uint8_t raid6_gfinv[256];
283 uint8_t raid6_gfexi[256];
284 uint8_t raid6_gflog[256];
285 uint8_t raid6_gfilog[256];
286 void make_tables(void)
292 /* Compute multiplication table */
293 for (i = 0; i < 256; i++)
294 for (j = 0; j < 256; j++)
295 raid6_gfmul[i][j] = gfmul(i, j);
297 /* Compute power-of-2 table (exponent) */
299 for (i = 0; i < 256; i++) {
303 v = 0; /* For entry 255, not a real entry */
306 /* Compute inverse table x^-1 == x^254 */
307 for (i = 0; i < 256; i++)
308 raid6_gfinv[i] = gfpow(i, 254);
310 /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
311 for (i = 0; i < 256; i ++)
312 raid6_gfexi[i] = raid6_gfinv[raid6_gfexp[i] ^ 1];
314 /* Compute log and inverse log */
315 /* Modified code from:
316 * http://web.eecs.utk.edu/~plank/plank/papers/CS-96-332.html
320 raid6_gfilog[255] = 0;
322 for (log = 0; log < 255; log++) {
323 raid6_gflog[b] = (uint8_t) log;
324 raid6_gfilog[log] = (uint8_t) b;
326 if (b & 256) b = b ^ 0435;
335 void ensure_zero_has_size(int chunk_size)
337 if (zero == NULL || chunk_size > zero_size) {
340 zero = xcalloc(1, chunk_size);
341 zero_size = chunk_size;
345 /* Following was taken from linux/drivers/md/raid6recov.c */
347 /* Recover two failed data blocks. */
349 void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
350 uint8_t **ptrs, int neg_offset)
352 uint8_t *p, *q, *dp, *dq;
354 const uint8_t *pbmul; /* P multiplier table for B data */
355 const uint8_t *qmul; /* Q multiplier table (for both) */
371 /* Compute syndrome with zero for the missing data pages
372 Use the dead data pages as temporary storage for
373 delta p and delta q */
379 qsyndrome(dp, dq, ptrs, disks-2, bytes);
381 /* Restore pointer table */
385 /* Now, pick the proper data tables */
386 pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
387 qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
393 *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
394 *dp++ = db ^ px; /* Reconstructed A */
399 /* Recover failure of one data block plus the P block */
400 void raid6_datap_recov(int disks, size_t bytes, int faila, uint8_t **ptrs,
404 const uint8_t *qmul; /* Q multiplier table */
414 /* Compute syndrome with zero for the missing data page
415 Use the dead data page as temporary storage for delta q */
419 qsyndrome(p, dq, ptrs, disks-2, bytes);
421 /* Restore pointer table */
424 /* Now, pick the proper data tables */
425 qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
429 *p++ ^= *dq = qmul[*q ^ *dq];
434 /* Try to find out if a specific disk has a problem */
435 int raid6_check_disks(int data_disks, int start, int chunk_size,
436 int level, int layout, int diskP, int diskQ,
437 uint8_t *p, uint8_t *q, char **stripes)
442 int curr_broken_disk = -1;
443 int prev_broken_disk = -1;
444 int broken_status = 0;
446 for(i = 0; i < chunk_size; i++) {
447 Px = (uint8_t)stripes[diskP][i] ^ (uint8_t)p[i];
448 Qx = (uint8_t)stripes[diskQ][i] ^ (uint8_t)q[i];
450 if((Px != 0) && (Qx == 0))
451 curr_broken_disk = diskP;
453 if((Px == 0) && (Qx != 0))
454 curr_broken_disk = diskQ;
456 if((Px != 0) && (Qx != 0)) {
457 data_id = (raid6_gflog[Qx] - raid6_gflog[Px]);
458 if(data_id < 0) data_id += 255;
459 diskD = geo_map(data_id, start/chunk_size,
460 data_disks + 2, level, layout);
461 curr_broken_disk = diskD;
464 if((Px == 0) && (Qx == 0))
465 curr_broken_disk = curr_broken_disk;
467 if(curr_broken_disk >= data_disks + 2)
470 switch(broken_status) {
472 if(curr_broken_disk != -1) {
473 prev_broken_disk = curr_broken_disk;
479 if(curr_broken_disk != prev_broken_disk)
485 curr_broken_disk = prev_broken_disk = -2;
490 return curr_broken_disk;
493 /*******************************************************************************
494 * Function: save_stripes
496 * Function reads data (only data without P and Q) from array and writes
497 * it to buf and opcjonaly to backup files
499 * source : A list of 'fds' of the active disks.
501 * offsets : A list of offsets on disk belonging
502 * to the array [bytes]
503 * raid_disks : geometry: number of disks in the array
504 * chunk_size : geometry: chunk size [bytes]
505 * level : geometry: RAID level
506 * layout : geometry: layout
507 * nwrites : number of backup files
508 * dest : A list of 'fds' for mirrored targets
509 * (e.g. backup files). They are already seeked to right
510 * (write) location. If NULL, data will be wrote
512 * start : start address of data to read (must be stripe-aligned)
514 * length - : length of data to read (must be stripe-aligned)
516 * buf : buffer for data. It is large enough to hold
517 * one stripe. It is stripe aligned
521 ******************************************************************************/
522 int save_stripes(int *source, unsigned long long *offsets,
523 int raid_disks, int chunk_size, int level, int layout,
524 int nwrites, int *dest,
525 unsigned long long start, unsigned long long length,
529 int data_disks = raid_disks - (level == 0 ? 0 : level <=5 ? 1 : 2);
532 unsigned long long length_test;
536 ensure_zero_has_size(chunk_size);
538 len = data_disks * chunk_size;
539 length_test = length / len;
542 if (length != length_test) {
543 dprintf("Error: save_stripes(): Data are not alligned. EXIT\n");
544 dprintf("\tArea for saving stripes (length) = %llu\n", length);
545 dprintf("\tWork step (len) = %i\n", len);
546 dprintf("\tExpected save area (length_test) = %llu\n",
553 int fdisk[3], fblock[3];
554 for (disk = 0; disk < raid_disks ; disk++) {
555 unsigned long long offset;
558 offset = (start/chunk_size/data_disks)*chunk_size;
559 dnum = geo_map(disk < data_disks ? disk : data_disks - disk - 1,
560 start/chunk_size/data_disks,
561 raid_disks, level, layout);
562 if (dnum < 0) abort();
563 if (source[dnum] < 0 ||
564 lseek64(source[dnum], offsets[dnum]+offset, 0) < 0 ||
565 read(source[dnum], buf+disk * chunk_size, chunk_size)
568 fdisk[failed] = dnum;
569 fblock[failed] = disk;
573 if (failed == 0 || fblock[0] >= data_disks)
574 /* all data disks are good */
576 else if (failed == 1 || fblock[1] >= data_disks+1) {
577 /* one failed data disk and good parity */
578 char *bufs[data_disks];
579 for (i=0; i < data_disks; i++)
581 bufs[i] = buf + data_disks*chunk_size;
583 bufs[i] = buf + i*chunk_size;
585 xor_blocks(buf + fblock[0]*chunk_size,
586 bufs, data_disks, chunk_size);
587 } else if (failed > 2 || level != 6)
588 /* too much failure */
591 /* RAID6 computations needed. */
592 uint8_t *bufs[data_disks+4];
595 disk = geo_map(-1, start/chunk_size/data_disks,
596 raid_disks, level, layout);
597 qdisk = geo_map(-2, start/chunk_size/data_disks,
598 raid_disks, level, layout);
599 if (is_ddf(layout)) {
600 /* q over 'raid_disks' blocks, in device order.
601 * 'p' and 'q' get to be all zero
603 for (i = 0; i < raid_disks; i++)
605 for (i = 0; i < data_disks; i++) {
606 int dnum = geo_map(i,
607 start/chunk_size/data_disks,
608 raid_disks, level, layout);
610 /* i is the logical block number, so is index to 'buf'.
611 * dnum is physical disk number
612 * and thus the syndrome number.
615 bufs[snum] = (uint8_t*)buf + chunk_size * i;
617 syndrome_disks = raid_disks;
619 /* for md, q is over 'data_disks' blocks,
620 * starting immediately after 'q'
621 * Note that for the '_6' variety, the p block
622 * makes a hole that we need to be careful of.
626 for (j = 0; j < raid_disks; j++) {
627 int dnum = (qdisk + 1 + j) % raid_disks;
628 if (dnum == disk || dnum == qdisk)
630 for (i = 0; i < data_disks; i++)
632 start/chunk_size/data_disks,
633 raid_disks, level, layout) == dnum)
635 /* i is the logical block number, so is index to 'buf'.
636 * dnum is physical disk number
637 * snum is syndrome disk for which 0 is immediately after Q
639 bufs[snum] = (uint8_t*)buf + chunk_size * i;
648 syndrome_disks = data_disks;
651 /* Place P and Q blocks at end of bufs */
652 bufs[syndrome_disks] = (uint8_t*)buf + chunk_size * data_disks;
653 bufs[syndrome_disks+1] = (uint8_t*)buf + chunk_size * (data_disks+1);
655 if (fblock[1] == data_disks)
656 /* One data failed, and parity failed */
657 raid6_datap_recov(syndrome_disks+2, chunk_size,
660 /* Two data blocks failed, P,Q OK */
661 raid6_2data_recov(syndrome_disks+2, chunk_size,
662 fdisk[0], fdisk[1], bufs, 0);
666 for (i = 0; i < nwrites; i++)
667 if (write(dest[i], buf, len) != len)
670 /* build next stripe in buffer */
681 * A list of 'fds' of the active disks. Some may be '-1' for not-available.
682 * A geometry: raid_disks, chunk_size, level, layout
683 * An 'fd' to read from. It is already seeked to the right (Read) location.
684 * A start and length.
685 * The length must be a multiple of the stripe size.
687 * We build a full stripe in memory and then write it out.
688 * We assume that there are enough working devices.
690 int restore_stripes(int *dest, unsigned long long *offsets,
691 int raid_disks, int chunk_size, int level, int layout,
692 int source, unsigned long long read_offset,
693 unsigned long long start, unsigned long long length,
697 char **stripes = xmalloc(raid_disks * sizeof(char*));
698 char **blocks = xmalloc(raid_disks * sizeof(char*));
702 int data_disks = raid_disks - (level == 0 ? 0 : level <= 5 ? 1 : 2);
704 if (posix_memalign((void**)&stripe_buf, 4096, raid_disks * chunk_size))
707 if (zero == NULL || chunk_size > zero_size) {
710 zero = xcalloc(1, chunk_size);
711 zero_size = chunk_size;
714 if (stripe_buf == NULL || stripes == NULL || blocks == NULL
719 for (i = 0; i < raid_disks; i++)
720 stripes[i] = stripe_buf + i * chunk_size;
722 unsigned int len = data_disks * chunk_size;
723 unsigned long long offset;
730 for (i = 0; i < data_disks; i++) {
731 int disk = geo_map(i, start/chunk_size/data_disks,
732 raid_disks, level, layout);
733 if (src_buf == NULL) {
735 if (lseek64(source, read_offset, 0) !=
736 (off64_t)read_offset) {
742 chunk_size) != chunk_size) {
747 /* read from input buffer */
748 memcpy(stripes[disk],
749 src_buf + read_offset,
752 read_offset += chunk_size;
754 /* We have the data, now do the parity */
755 offset = (start/chunk_size/data_disks) * chunk_size;
759 disk = geo_map(-1, start/chunk_size/data_disks,
760 raid_disks, level, layout);
761 for (i = 0; i < data_disks; i++)
762 blocks[i] = stripes[(disk+1+i) % raid_disks];
763 xor_blocks(stripes[disk], blocks, data_disks, chunk_size);
766 disk = geo_map(-1, start/chunk_size/data_disks,
767 raid_disks, level, layout);
768 qdisk = geo_map(-2, start/chunk_size/data_disks,
769 raid_disks, level, layout);
770 if (is_ddf(layout)) {
771 /* q over 'raid_disks' blocks, in device order.
772 * 'p' and 'q' get to be all zero
774 for (i = 0; i < raid_disks; i++)
775 if (i == disk || i == qdisk)
776 blocks[i] = (char*)zero;
778 blocks[i] = stripes[i];
779 syndrome_disks = raid_disks;
781 /* for md, q is over 'data_disks' blocks,
782 * starting immediately after 'q'
784 for (i = 0; i < data_disks; i++)
785 blocks[i] = stripes[(qdisk+1+i) % raid_disks];
787 syndrome_disks = data_disks;
789 qsyndrome((uint8_t*)stripes[disk],
790 (uint8_t*)stripes[qdisk],
792 syndrome_disks, chunk_size);
795 for (i=0; i < raid_disks ; i++)
798 offsets[i]+offset, 0) < 0) {
802 if (write(dest[i], stripes[i],
803 chunk_size) != chunk_size) {
822 int test_stripes(int *source, unsigned long long *offsets,
823 int raid_disks, int chunk_size, int level, int layout,
824 unsigned long long start, unsigned long long length)
826 /* ready the data and p (and q) blocks, and check we got them right */
827 char *stripe_buf = xmalloc(raid_disks * chunk_size);
828 char **stripes = xmalloc(raid_disks * sizeof(char*));
829 char **blocks = xmalloc(raid_disks * sizeof(char*));
830 uint8_t *p = xmalloc(chunk_size);
831 uint8_t *q = xmalloc(chunk_size);
835 int data_disks = raid_disks - (level == 5 ? 1: 2);
840 for ( i = 0 ; i < raid_disks ; i++)
841 stripes[i] = stripe_buf + i * chunk_size;
846 for (i = 0 ; i < raid_disks ; i++) {
847 lseek64(source[i], offsets[i]+start, 0);
848 read(source[i], stripes[i], chunk_size);
850 for (i = 0 ; i < data_disks ; i++) {
851 int disk = geo_map(i, start/chunk_size, raid_disks,
853 blocks[i] = stripes[disk];
854 printf("%d->%d\n", i, disk);
858 qsyndrome(p, q, (uint8_t**)blocks, data_disks, chunk_size);
859 diskP = geo_map(-1, start/chunk_size, raid_disks,
861 if (memcmp(p, stripes[diskP], chunk_size) != 0) {
862 printf("P(%d) wrong at %llu\n", diskP,
865 diskQ = geo_map(-2, start/chunk_size, raid_disks,
867 if (memcmp(q, stripes[diskQ], chunk_size) != 0) {
868 printf("Q(%d) wrong at %llu\n", diskQ,
871 disk = raid6_check_disks(data_disks, start, chunk_size,
872 level, layout, diskP, diskQ,
875 printf("Possible failed disk: %d\n", disk);
878 printf("Failure detected, but disk unknown\n");
882 length -= chunk_size;
888 unsigned long long getnum(char *str, char **err)
891 unsigned long long rv = strtoull(str, &e, 10);
899 char const Name[] = "test_restripe";
900 int main(int argc, char *argv[])
902 /* save/restore file raid_disks chunk_size level layout start length devices...
909 unsigned long long *offsets;
910 int raid_disks, chunk_size, level, layout;
911 unsigned long long start, length;
916 fprintf(stderr, "Usage: test_stripe save/restore file raid_disks chunk_size level layout start length devices...\n");
919 if (strcmp(argv[1], "save")==0)
921 else if (strcmp(argv[1], "restore") == 0)
923 else if (strcmp(argv[1], "test") == 0)
926 fprintf(stderr, "test_stripe: must give 'save' or 'restore'.\n");
931 raid_disks = getnum(argv[3], &err);
932 chunk_size = getnum(argv[4], &err);
933 level = getnum(argv[5], &err);
934 layout = getnum(argv[6], &err);
935 start = getnum(argv[7], &err);
936 length = getnum(argv[8], &err);
938 fprintf(stderr, "test_stripe: Bad number: %s\n", err);
941 if (argc != raid_disks + 9) {
942 fprintf(stderr, "test_stripe: wrong number of devices: want %d found %d\n",
946 fds = xmalloc(raid_disks * sizeof(*fds));
947 offsets = xcalloc(raid_disks, sizeof(*offsets));
949 storefd = open(file, O_RDWR);
952 fprintf(stderr, "test_stripe: could not open %s.\n", file);
955 for (i=0; i<raid_disks; i++) {
957 p = strchr(argv[9+i], ':');
961 offsets[i] = atoll(p) * 512;
964 fds[i] = open(argv[9+i], O_RDWR);
967 fprintf(stderr,"test_stripe: cannot open %s.\n", argv[9+i]);
972 buf = xmalloc(raid_disks * chunk_size);
975 int rv = save_stripes(fds, offsets,
976 raid_disks, chunk_size, level, layout,
981 "test_stripe: save_stripes returned %d\n", rv);
984 } else if (save == 2) {
985 int rv = test_stripes(fds, offsets,
986 raid_disks, chunk_size, level, layout,
990 "test_stripe: test_stripes returned %d\n", rv);
994 int rv = restore_stripes(fds, offsets,
995 raid_disks, chunk_size, level, layout,
997 start, length, NULL);
1000 "test_stripe: restore_stripes returned %d\n",