* we cannot load the segusage block for each snapshot and then parse them
* in parallel. Instead we allocate space to store a max usage and
* merge each block one at a time into that max. We then combine the
- * max with the youth to get a 32bit weight... I wonder if that is good.
+ * max with the youth to get a 64bit weight... I wonder if that is good.
*
*/
INIT_HLIST_NODE(&new->hash);
dv = fs->devs + devnum;
addr = LAFSI(fs->ss[ssnum].root)->md.fs.usagetable * dv->tablesize;
- addr += segnum >> (fs->blocksize_bits-1);
+ addr += segnum >> (fs->blocksize_bits - USAGE_SHIFT);
new->ssblk = lafs_get_block(dv->segsum, addr, NULL,
GFP_KERNEL,
MKREF(ss));
if (ssnum == 0)
new->youthblk = lafs_get_block(dv->segsum,
- segnum >> (fs->blocksize_bits-1),
+ segnum >> (fs->blocksize_bits
+ - YOUTH_SHIFT),
NULL,
GFP_KERNEL,
MKREF(ssyouth));
if (!in_phase)
atomic_add(diff, &ss->delayed);
else {
- u16 *b, *p;
+ u32 *b, *p;
b = map_dblock(ss->ssblk);
spin_lock(&fs->stable_lock);
- p = &b[ss->segnum & ((fs->blocksize-1)>>1)];
- //BUG_ON(diff < 0 && le16_to_cpu(*p) < -diff);
- if (diff < 0 && le16_to_cpu(*p) < -diff) {
- printk("diff=%d p=%d segnum=%d\n", diff, le16_to_cpu(*p),
+ p = &b[ss->segnum & ((fs->blocksize-1)>>USAGE_SHIFT)];
+ //BUG_ON(diff < 0 && le32_to_cpu(*p) < -diff);
+ if (diff < 0 && le32_to_cpu(*p) < -diff) {
+ printk("diff=%d p=%d segnum=%d\n", diff, le32_to_cpu(*p),
ss->segnum);
BUG();
}
- *p = cpu_to_le16(le16_to_cpu(*p) + diff);
+ *p = cpu_to_le32(le32_to_cpu(*p) + diff);
spin_unlock(&fs->stable_lock);
unmap_dblock(ss->ssblk, b);
lafs_dirty_dblock(ss->ssblk);
y = decay_undo(y);
ybuf = map_dblock(ss->youthblk);
youthp = ybuf + (ss->segnum & ((1 << (fs->blocksize_bits
- - 1)) - 1));
+ - YOUTH_SHIFT)) - 1));
if (le16_to_cpu(*youthp) < 8) {
*youthp = cpu_to_le16(y);
fs->youth_next++;
* again and remove it properly so it can become cleanable later.
*/
-#define SCORE_MAX 0xFFFFFFFC /* Maximum normal score */
-#define SCORE_ACTIVE 0xFFFFFFFD /* This segment is being written to */
-#define SCORE_CLEANING 0xFFFFFFFE /* This segment in being cleaned */
-#define SCORE_DEAD 0xFFFFFFFF /* This segment is to be removed */
+#define SCORE_MAX 0xFFFFFFFFFFFFFFFCULL /* Maximum normal score */
+#define SCORE_ACTIVE 0xFFFFFFFFFFFFFFFDULL /* This segment is being written to */
+#define SCORE_CLEANING 0xFFFFFFFFFFFFFFFEULL /* This segment in being cleaned */
+#define SCORE_DEAD 0xFFFFFFFFFFFFFFFFULL /* This segment is to be removed */
struct segstat {
u16 next;
u16 dev;
u32 segment;
- u32 score;
- u16 usage;
+ u64 score;
+ u32 usage;
u16 skip[0]; /* or larger... */
};
}
static struct segstat *seg_add_new(struct segtracker *st, struct slist *which, int atend,
- int dev, u32 seg, int score, int usage,
+ int dev, u32 seg, long long score, int usage,
u16 *where[SEG_NUM_HEIGHTS])
{
int ssn;
fs->free_blocks += fs->devs[ss->dev].segment_size;
spin_unlock(&fs->lock);
db = lafs_get_block(fs->devs[ss->dev].segsum,
- ss->segment >> (fs->blocksize_bits-1),
+ ss->segment >> (fs->blocksize_bits
+ - YOUTH_SHIFT),
NULL, GFP_KERNEL | __GFP_NOFAIL,
MKREF(cleanfree));
err = lafs_read_block(db);
if (err == 0) {
u16 *b = map_dblock(db);
spin_lock(&fs->stable_lock);
- b[ss->segment & ((fs->blocksize-1)>>1)] = 0;
+ b[ss->segment & ((fs->blocksize-1)>>YOUTH_SHIFT)] = 0;
spin_unlock(&fs->stable_lock);
unmap_dblock(db, b);
lafs_dirty_dblock(db);
for (ssn = st->cleanable.first; ssn != 0xffff; ssn = ss->next) {
ss = segfollow(st, ssn);
- printk("%3d: %3d/%-4d %5d %d\n",
+ printk("%3d: %3d/%-4d %5d %lld\n",
i, ss->dev,
ss->segment,
ss->usage,
for (ssn = st->cleanable.first; ssn != 0xffff; ssn = ss->next) {
ss = segfollow(st, ssn);
- printk("%3d: %3d/%-4d %5d %d\n",
+ printk("%3d: %3d/%-4d %5d %lld\n",
i, ss->dev,
ss->segment,
ss->usage,
for (ssn = st->free.first; ssn != 0xffff; ssn = ss->next) {
ss = segfollow(st, ssn);
- printk("%3d: %3d/%-4d %5d %d\n",
+ printk("%3d: %3d/%-4d %5d %lld\n",
ssn, ss->dev,
ss->segment,
ss->usage,
for (ssn = st->clean.first; ssn != 0xffff; ssn = ss->next) {
ss = segfollow(st, ssn);
- printk("%3d: %3d/%-4d %5d %d\n",
+ printk("%3d: %3d/%-4d %5d %lld\n",
ssn, ss->dev,
ss->segment,
ss->usage,
*dev = ss->dev;
*seg = ss->segment;
- dprintk("SEG: cleanable %d/%d score=%d usage=%d\n",
- ss->dev, ss->segment, ss->score, ss->usage);
+ dprintk("SEG: cleanable %d/%d score=%llu usage=%d\n",
+ ss->dev, ss->segment, (unsigned long long)ss->score, ss->usage);
ss->score = SCORE_CLEANING;
if (ss->usage == 0) {
}
static int add_cleanable(struct fs *fs, unsigned int dev, u32 seg,
- u16 youth, u16 usage)
+ u16 youth, u32 usage)
{
- u32 score;
+ u64 score;
struct segstat *ss;
u32 segsize;
u16 *where[SEG_NUM_HEIGHTS];
if (test_bit(EmergencyClean, &fs->fsstate))
score = usage;
- else
- /* 0x10000 is to ensure this score is always
+ else {
+ /* 0x100000000 is to ensure this score is always
* more than the above score */
- score = youth * usage / segsize + 0x10000;
+ score = (u64)youth * usage;
+ do_div(score, segsize);
+ score += 0x100000000;
+ }
spin_lock(&fs->lock);
if (score > SCORE_MAX)
return 1;
}
-static void merge_usage(struct fs *fs, u16 *d)
+static void merge_usage(struct fs *fs, u32 *d)
{
- u16 *u = fs->scan.free_usages;
- int segperblk = fs->blocksize / 2;
+ u32 *u = fs->scan.free_usages;
+ int segperblk = fs->blocksize >> USAGE_SHIFT;
int i;
for (i = 0; i < segperblk; i++)
- if (le16_to_cpu(d[i]) > le16_to_cpu(u[i]))
+ if (le32_to_cpu(d[i]) > le32_to_cpu(u[i]))
u[i] = d[i];
}
unsigned long lafs_scan_seg(struct fs *fs)
{
+ /* FIXME this comment is very out-dated */
/* Process one block of youth or segment-usage data. We
* collect free segments (youth==0) into a table that is kept
* sorted to ensure against duplicates. It is treated like a
*/
int dev = fs->scan.free_dev;
int block = fs->scan.free_block + 1;
+ int youthblock = block >> (USAGE_SHIFT - YOUTH_SHIFT);
int err;
while (dev < 0 ||
}
}
if (fs->scan.youth_db)
- if (fs->scan.youth_db->b.fileaddr != block ||
+ if (fs->scan.youth_db->b.fileaddr != youthblock ||
dev < 0 ||
fs->scan.youth_db->b.inode != fs->devs[dev].segsum) {
putdref(fs->scan.youth_db, MKREF(youth_scan));
if (fs->scan.youth_db == NULL)
fs->scan.youth_db =
lafs_get_block(fs->devs[dev].segsum,
- block,
+ youthblock,
NULL, GFP_KERNEL, MKREF(youth_scan));
if (!fs->scan.youth_db) {
printk("EEEEEKKKKK get_block failed\n");
spin_lock(&fs->lock);
fs->scan.free_block = block;
fs->scan.free_dev = dev;
- if (!err && fs->scan.do_decay) {
+ if (!err && fs->scan.do_decay &&
+ youthblock << (USAGE_SHIFT - YOUTH_SHIFT) == block) {
u16 *yp = map_dblock(fs->scan.youth_db);
int i;
- int segperblk = fs->blocksize / 2;
+ int segperblk = fs->blocksize >> YOUTH_SHIFT;
for (i = 0 ; i < segperblk ; i++) {
int y = le16_to_cpu(yp[i]);
*/
struct datablock *db;
char *d;
- u16 *yp;
+ u16 *yp, *yp0;
int i;
int firstseg;
- int segperblk = fs->blocksize / 2;
+ int segperblk = fs->blocksize >> USAGE_SHIFT;
int segments = segperblk;
int segcount;
int blks;
segments = segcount % segperblk;
firstseg = fs->scan.free_block * segperblk;
- yp = map_dblock(fs->scan.youth_db);
+ yp0 = yp = map_dblock(fs->scan.youth_db);
+ yp += (fs->scan.free_block -
+ (fs->scan.youth_db->b.fileaddr << (USAGE_SHIFT - YOUTH_SHIFT)))
+ * segperblk;
for (i = 0; i < segments ; i++)
if (yp[i] == cpu_to_le16(0)) {
if (fs->scan.first_free_pass) {
fs->devs[fs->scan.free_dev]
.segment_size /*- 1*/;
}
- unmap_dblock(fs->scan.youth_db, yp);
+ unmap_dblock(fs->scan.youth_db, yp0);
fs->scan.usage0_db = db;
fs->scan.free_stage = 2;
while (fs->scan.free_stage > 1 &&
fs->scan.free_stage < fs->maxsnapshot + 1) {
struct datablock *db;
- u16 *d;
+ u32 *d;
if (fs->ss[fs->scan.free_stage-1].root == NULL) {
fs->scan.free_stage++;
* cleanable segments now
*/
u16 *yp = map_dblock(fs->scan.youth_db);
- u16 *up = fs->scan.free_usages;
+ u16 *yp0 = yp;
+ u32 *up = fs->scan.free_usages;
int i;
- int segperblk = fs->blocksize / 2;
+ int segperblk = fs->blocksize >> USAGE_SHIFT;
int segments = segperblk;
int segcount = fs->devs[fs->scan.free_dev].segment_count;
int blks = segcount / segments;
if (fs->scan.free_block == blks)
segments = segcount % segperblk;
+ yp += (fs->scan.free_block -
+ (fs->scan.youth_db->b.fileaddr << (USAGE_SHIFT - YOUTH_SHIFT)))
+ * segperblk;
for (i = 0; i < segments; i++)
if (add_cleanable(fs, fs->scan.free_dev,
i + fs->scan.free_block * segperblk,
(void)getdref(fs->scan.usage0_db, MKREF(intable));
}
- unmap_dblock(fs->scan.youth_db, yp);
+ unmap_dblock(fs->scan.youth_db, yp0);
putdref(fs->scan.usage0_db, MKREF(usage0));
fs->scan.usage0_db = NULL;
fs->scan.free_stage = 0;