test_bit(B_Pinned, &b[i].b.flags) ||
test_bit(B_Alloc, &b[i].b.flags)
/* NOTE: if we find an Uninc is set when we
- * need to invalidate the page, then we
+ * need to invalidate the page, then we
* should be waiting for all pages to be gone
* properly before allowing truncate to complete.
* The whole file doesn't need to be truncated yet,
* that can continue lazily. but all the pages must
* be incorporated. Maybe we just need to
- * wait for a checkpoint here.??
+ * wait for a checkpoint here.??
*/
|| test_bit(B_Uninc, &b[i].b.flags)
/* || atomic_read(&b[i].b.refcnt) */
list_del_init(&b[i].b.lru);
list_del_init(&b[i].b.peers);
(void)getdref(&b[i]);
- /* When !PagePrivate(page), && refcnt, we hold a ref on the
+ /* When !PagePrivate(page), && refcnt, we hold a ref on the
* first block which hold a ref on the page.
* When ref on firstblock with !PagePrivate(page) becomes zero,
* we free
ClearPagePrivate(page);
spin_unlock(&mapping->private_lock);
-
+
for (i=0; i<(1<<bits); i++)
if (b[i].b.parent)
putref(&b[i].b.parent->b);
return 1;
}
-
/* Pinning and dirtying of datablocks.
* Before a modification of a datablock can be allowed we must be sure there
* will be room to write it out. Thus suitable pre-allocations are required.
/* We need to:
* - pin parents and inode
* - preallocate as needed
- * - reference the old segment
+ * - reference the old segment
* - update flags and pointers.
*/
int err;
/* We don't pin a datablock of an inode if there is an
* InoIdx block. We pin the InoIdx block instead.
* They might both be pinned at the same time, but
- * only when the index block has swapped phase and the
+ * only when the index block has swapped phase and the
* data block is waiting to be written.
*/
if (LAFSI(b->b.inode)->type == TypeInodeFile &&
BUG_ON(LAFSI(b->b.inode)->depth != LAFSI(b->b.inode)->iblock->depth);
lafs_clear_index(LAFSI(b->b.inode)->iblock);
clear_bit(B_PhysValid, &b->b.flags);
-
#endif
}
#if 0
* few dozen segments - experimentation and possible tuning needed.
*
* Checkpoint creation is not needed for any data integrity
- * requirements. It is not even needed at unmount, but is
+ * requirements. It is not even needed at unmount, but is
* done at that point anyway to minimise startup time.
*
* Taking a checkpoint involves:
* - marking the end of the checkpoint.
*
* The synchronisation and bit flipping is from anywhere that decides
- * a checkpoint is needed. The flush and finish-up is handled by
+ * a checkpoint is needed. The flush and finish-up is handled by
* a thread that is created for the purpose.
*/
#ifdef DUMP
extern struct fs *dfs;
-
char *strflags(struct block *b)
{
static char ans[200];
char *strblk(struct block *b)
{
static char ans[200];
-
+
if (!b) return "(NULL block)";
if (test_bit(B_PhysValid, &b->flags))
- sprintf(ans, "[%p]%lu/%lu(%llu)r%d:%s", b,b->inode->i_ino, b->fileaddr,
+ sprintf(ans, "[%p]%lu/%lu(%llu)r%d:%s", b,b->inode->i_ino, b->fileaddr,
b->physaddr, atomic_read(&b->refcnt), strflags(b));
else
- sprintf(ans, "[%p]%lu/%lu(NoPhysAddr)r%d:%s", b,b->inode->i_ino, b->fileaddr,
+ sprintf(ans, "[%p]%lu/%lu(NoPhysAddr)r%d:%s", b,b->inode->i_ino, b->fileaddr,
atomic_read(&b->refcnt), strflags(b));
if (test_bit(B_Pinned, &b->flags)&&
test_bit(B_Index, &b->flags))
}
#endif
-
static int prepare_checkpoint(struct fs *fs)
{
int oldphase;
if (b)
/* the list counted a reference. Now we hold it */
list_del_init(&b->lru);
-
spin_unlock(&fs->lock);
return b;
fs->cleaner.cleaning = 0;
/* FIXME should I wake someone up? */
/* FIXME might I now be racing with unmount and module unload??? */
-
+
fs->checkpoint_youth = youth;
wake_up(&fs->phase_wait);
}
return HZ/10; /* FIXME that is gross ... is it needed? */
/* OK, time for some work. */
printk("############################ start checkpoint\n");
-// printk("1");
+// printk("1");
y = prepare_checkpoint(fs);
-// printk("2");
+// printk("2");
do_checkpoint(fs);
-// printk("3");
+// printk("3");
finish_checkpoint(fs, y);
-// printk("\n");
+// printk("\n");
printk("############################ finish checkpoint\n");
return MAX_SCHEDULE_TIMEOUT;
return cp;
}
-
void lafs_checkpoint_lock(struct fs *fs)
{
spin_lock(&fs->lock);
}
//printk("o");
if (IS_ERR(ino)) {
- /* FIXME check that this is -EAGAIN
+ /* FIXME check that this is -EAGAIN
* FIXME should have lafs_iget_fs return the
* ino anyway, but lafs_find_block_async,
* doesn't try until ino is uptodate.
* is below the faction of free space that is not clean.
* i.e. if T is total space, C is clean space, F is free space,
* then clean when C/T < (F-C)/F
- * Avoiding division, this is
+ * Avoiding division, this is
* C * F < T * (F - C)
*
* T we know from the size of the devices
* Released under the GPL, version 2
*/
-
/*
* Blocks to be written to a cluster need to be sorted so as to minimise
* fragmentation and to keep the cluster head small.
* at each level which is before-or-at the found
* block. This can be used for easy insertion.
*/
-
+
int level;
struct block *b, *next;
for (level = SKIP_MAX_HEIGHT-1; level >= 0 ; level--) {
int level;
struct block *b;
int cmpbefore, cmpafter;
- unsigned long rand[DIV_ROUND_UP(SKIP_MAX_HEIGHT * 2 / 8 + 1,
+ unsigned long rand[DIV_ROUND_UP(SKIP_MAX_HEIGHT * 2 / 8 + 1,
sizeof(unsigned long))];
int height;
* the segment
*/
-
static int seg_remainder(struct fs *fs, struct segpos *seg)
{
/* return the number of blocks from the start of segpos to the
addr += seg->num * dv->segment_stride;
addr += dv->start;
-
/* now step forward in column or table or seg */
seg->col++;
if (seg->col >= dv->width) {
test_bit(B_Phase1, &b->flags)
) {
/* Don't allocate yet, until index block is ready */
-
+
spin_unlock(&dblk(b)->my_inode->i_data.private_lock);
return 0;
}
spin_unlock(&dblk(b)->my_inode->i_data.private_lock);
}
/* and if this is the indexblock, we schedule a write of the
- * datablock
+ * datablock
*/
if (test_bit(B_InoIdx, &b->flags)) {
/* FIXME should I inode_fillblock here?? */
if (credits > 1)
lafs_space_return(fs, credits-1);
BUG_ON(credits < 1);
- }
+ }
if (!test_and_clear_bit(B_UnincCredit, &b->flags)) {
printk("no uninc credit %s\n", strblk(b));
BUG();
lafs_space_return(fs,1);
if (test_and_set_bit(B_ICredit, &b->flags))
lafs_space_return(fs,1);
-
+
if (cnum == 0)
lafs_dirty_dblock(dblk(b));
else {
- /* FIXME this code should be in clean.c
+ /* FIXME this code should be in clean.c
* it is copied from lafs_dirty_dblock() */
if (!test_and_set_bit(B_Realloc, &b->flags)) {
if (!test_and_clear_bit(B_Credit, &b->flags))
lafs_iounlock_block(b, 0);
return wc->cluster_seq;
}
- if (!test_bit(B_Valid, &b->flags))
+ if (!test_bit(B_Valid, &b->flags))
printk("Not Valid in allocate %s\n", strblk(b));
BUG_ON(!test_bit(B_Valid, &b->flags));
} else
desc_start->block_bytes = cpu_to_le32(1<<blkbits);
}
-
+
}
/*------------------------------------------------------------------------
return cpu_to_le32(csum);
}
-
/*------------------------------------------------------------------------
* Cluster flushing.
* Once we have a suitable number of blocks and updates in a writecluster
if (!test_bit(B_Index, &b->flags)) {
struct page *page = dblk(b)->page;
struct datablock *db = (struct datablock *)page->private;
- int blks = 1 << (PAGE_SHIFT -
+ int blks = 1 << (PAGE_SHIFT -
page->mapping->host->i_sb->s_blocksize_bits);
if (PageWriteback(page) && db) {
int j;
lafs_clusters_done(fs);
}
-
static void cluster_flush(struct fs *fs, int cnum)
{
struct wc *wc = &fs->wc[cnum];
if (wc->remaining < 2)
new_segment(fs, cnum);
-
/* Fill in the cluster header */
strncpy(wc->chead->idtag, "LaFSHead", 8);
if (cnum)
wait_event(wc->pending_wait,
atomic_read(&wc->pending_cnt[which])==1);
-
lafs_clusters_done(fs);
dprintk("cluster_flush pre-bug pending_next=%d cnt=%d\n",
wc->pending_next, atomic_read(&wc->pending_cnt[wc->pending_next]));
}
}
-
/* The end_io function for writes to a cluster is one
* of 4 depending on which in the circular list the
* block is for.
return cluster_endio_data_3;
}
-
int lafs_cluster_init(struct fs *fs, int cnum, u64 addr, u64 prev, u64 seq)
{
int blocksize = fs->prime_sb->s_blocksize;
/* Need to write this block out and wait until
* it has been written, so that we can update it
* without risking corruption to previous snapshot.
- *
+ *
*/
}
return hash + offset;
}
-
/* dpaddr assumes that 'psz' (piece size) is valid when called */
#define dpaddr(_block, _piece) ((struct dirpiece*)((_block) + ((_piece)<<psz)))
#define dlpaddr(_block, _piece) ((struct dirleafpiece*)((_block) + ((_piece)<<psz)))
dp->longer = Neither;
dp->length = len;
dp->type = type;
-
+
memcpy(dp->name, name, len);
if (chain_offset <= 1)
}
/* NOTE: we want the last piece, not the next free piece, so we don't add
- * (1<<psz) into this sum
+ * (1<<psz) into this sum
*/
dh->lastpiece = pnum + ((offsetof(struct dirpiece, name)+len-1)>> psz);
dh->freepieces = 255 - dh->lastpiece;
}
-
-
-
static inline int dir_rotate_2(char *block, int psz, u8 *treep, int dir)
{
unsigned int B,C,D,E;
dp->longer = Neither;
else {
/* need a rotation */
- second =
+ second =
dpaddr(block, dp->next[1-dir]);
if (second->longer == dir) {
struct dirpiece *third =
dp->longer = 1-dir;
second = dpaddr(block, *topp);
second->longer = dir;
- } else
+ } else
dir_rotate_2(block, psz, topp, 1-dir);
if (piece == targetn) {
second = dpaddr(block, *topp);
st++;
}
- /* Ok, rebalance all done, now swap *targetp for *thisp and
+ /* Ok, rebalance all done, now swap *targetp for *thisp and
* delete
*/
-
+
piece = *thisp;
*targetp = piece;
dp = dpaddr(block, piece);
/* now second can be destroyed */
second->target = 0;
- space = second->length + (second->chain_info < 2
+ space = second->length + (second->chain_info < 2
? 0 : second->chain_info == 2 ? 1 : 4);
space += offsetof(struct dirpiece, name);
space = DIV_ROUND_UP(space, 1<<psz);
}
dp->next[0] = tail;
dh->root = piece;
-}
+}
struct dir_ent *
lafs_dir_extract(char *block, int psz, struct dir_ent *de, int pnum,
dp->type = de->type;
}
-void lafs_dir_split(char *orig, int psz, char *new1, char *new2,
+void lafs_dir_split(char *orig, int psz, char *new1, char *new2,
const char *name, u32 target, int type, u32 *newhash,
u32 seed, u32 hash, int chainoffset)
{
if (type == 0 || (first && hval < hash)) {
/* first is the preferred candidate */
if (!lafs_dir_add_ent(new1, psz, dp->name,
- dp->length,
+ dp->length,
le32_to_cpu(dp->target),
dp->type, seed, hval, offset))
full1 = 1;
if (first)
lafs_dir_init_block(new, psz, dp->name,
dp->length,
- le32_to_cpu(dp->target),
+ le32_to_cpu(dp->target),
dp->type, offset);
else
lafs_dir_add_ent(new, psz, dp->name, dp->length,
{
/* walk around the tree, and BUG if we ever get a depth > 255 */
struct dirheader *dh = (struct dirheader*)block;
-
+
if (pnum == -1)
pnum = dh->root;
if (depth <= 0) return 1;
return (dh->lastpiece+1) << psz;
}
-
-
/* Testing code here - no new important functionality */
-#ifndef MAIN
+#ifndef MAIN
static void xprintk(char *block, int psz, char *s, int a, int b, int c, int d)
{
dir_print(block, psz);
BUG();
}
-
+
static int dir_check_depth(char *block, int psz, int p, int depth)
{
struct dirpiece *dp = dpaddr(block,p);
xprintk(block,psz, "... %d - b=%d f=%d lgr=%d\n", p, b, f, dp->longer);
return (b>f?b:f)+1;
}
-
+
static void dir_check_balance(char *block, int psz)
{
struct dirheader *dh = (struct dirheader*) block;
#ifdef MAIN
-
int noise = 0;
int main(int argc, char **argv)
{
int psz = blenshift - 8;
int arg = 2;
char nm[256];
-
+
lafs_dir_init_block(block, psz, argv[1], 0, 42, 3, 0);
while (arg < argc-1) {
if (argv[arg][0] != '-')
hash = lafs_hash_name(seed, nlen, name);
-
while(1) {
char *buf;
bn = hash+1;
*
* The whole processes uses a dirop_handle to store various aspects
* of state that might need to be unwound or committed.
- * A compound operation such as rename may included several
+ * A compound operation such as rename may included several
* simple operations such as delete + create. In that case there
* will be a separate dirop_handle for each simple operation.
* There will only be one create
* commit_create finalises the create and cannot fail.
*/
-
static int dir_create_prepare(struct fs *fs, struct inode *dir,
const char *name, int nlen,
u32 inum, int type,
if (rv < 0)
return -EEXIST;
if (rv == 1) {
- doh->hash = hash;
+ doh->hash = hash;
doh->chainoffset = chainoffset;
return 0;
}
memcpy(buf, tmp, blocksize);
unmap_dblock(dirblk, buf);
kfree(tmp);
- doh->hash = hash;
+ doh->hash = hash;
doh->chainoffset = chainoffset;
return 0;
}
}
static void
-dir_delete_commit(struct dirop_handle *doh,
+dir_delete_commit(struct dirop_handle *doh,
struct fs *fs, struct inode *dir,
const char *name, int nlen)
{
if (doh->dirent_block)
err = lafs_orphan_pin(&doh->oi, doh->dirent_block, 1);
if (err) printk("2 err=%d\n", err);
-
+
return err;
}
}
static void dir_log_commit(struct update_handle *uh,
- struct fs *fs, struct inode *dir,
+ struct fs *fs, struct inode *dir,
struct qstr *name, u32 target,
int operation, u32 *handle)
{
*(u32*)mb = cpu_to_le32(target);
lafs_cluster_update_commit_buf(uh, fs, dir, han, operation,
- 4+name->len, mb,
+ 4+name->len, mb,
name->len, name->name);
}
LAFSI(ino)->md.file.parent = dir->i_ino;
lafs_dirty_inode(ino);
dir_log_commit(&uh, fs, dir, &de->d_name, ino->i_ino, DIROP_LINK, NULL);
- dir_create_commit(&doh, fs, dir, de->d_name.name, de->d_name.len,
+ dir_create_commit(&doh, fs, dir, de->d_name.name, de->d_name.len,
ino->i_ino, DT_REG);
lafs_checkpoint_unlock(fs);
d_instantiate(de, ino); /* FIXME do I need to iput?? */
struct datablock *inodb;
int err;
- dprintk("enter unlink: refcnt = %d\n",
+ dprintk("enter unlink: refcnt = %d\n",
atomic_read(&LAFSI(inode)->dblock->b.refcnt));
err = dir_delete_prepare(fs, dir, de->d_name.name, de->d_name.len, &doh);
iput(ino);
return -ENOMEM;
}
-
+
err = dir_create_prepare(fs, dir, de->d_name.name, de->d_name.len,
ino->i_ino, DT_LNK, &doh);
err = dir_log_prepare(&uh, fs, &de->d_name) ?: err;
ino = lafs_new_inode(fs, dir, TypeDir, 0, mode, &inodb);
if (IS_ERR(ino))
return PTR_ERR(ino);
-
+
err = dir_create_prepare(fs, dir, de->d_name.name, de->d_name.len,
ino->i_ino, DT_DIR, &doh);
err = dir_log_prepare(&uh, fs, &de->d_name) ?: err;
mode_to_dt(old_inode->i_mode),
&new_doh) ?: err;
-
if (err)
goto abort;
/*--------------------------------------------------------------------
* Directory Orphan handling.
- *
+ *
* blocks in a directory file that are 'orphans' have recently had a deletion
* and may need:
* - to be punched as a hole if empty
* - to have 'deleted' entries purged in they are freeable
* - to schedule next block for orphan handling if that might be appropriate.
- *
- *
- * All pending merges need to be linked to the inode.
+ *
+ *
+ * All pending merges need to be linked to the inode.
*
* Specifically:
* Lock the directory.
unmap_dblock(b2, buf2);
putdref(b2);
err = lafs_pin_dblock(b);
- // FIXME what about EAGAIN??
+ // FIXME what about EAGAIN??
if (err)
goto abort;
buf = map_dblock(b);
dprintk("read page %p for %d blocks\n", page, n);
for (i=0; i < n; i++) {
- struct datablock *b = lafs_get_block(ino, i, page,
+ struct datablock *b = lafs_get_block(ino, i, page,
GFP_KERNEL);
if (!b) {
err = -ENOMEM;
for (i = first + 1 ; i <= last ; i++)
lafs_get_block(ino, i, page, GFP_KERNEL);
-
if (start != (first << bits)) {
err = lafs_find_block(fb, 0);
if (!err)
#if 0
for (i=0; i < (1<<bits); i++) {
- struct datablock *b = lafs_get_block(ino, i, page,
+ struct datablock *b = lafs_get_block(ino, i, page,
GFP_KERNEL);
if (!b)
continue;
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
-/* .ioctl = ext3_ioctl,*/
+/* .ioctl = ext3_ioctl,*/
.mmap = generic_file_mmap,
.open = generic_file_open,
/* .release = ext3_release_file,*/
* Consider gang-freeing for adding to head of lru lists.
*/
-
/* FIXME these should be configured at runtime based on memory */
#define HASHBITS 10
static struct hlist_head hash_table[1<<HASHBITS];
spin_unlock(&lafs_hash_lock);
}
-
-
/* adopt blk into parent if possible.
*/
static void
spin_unlock(&as->private_lock);
}
-
static int __must_check
find_block(struct datablock *b, int adopt, int async);
int oldphase = !!test_bit(B_Phase1, &b->flags);
struct block *ulist;
- dprintk("FLIP %d/%d %d->%d\n", (int)b->inode->i_ino,
+ dprintk("FLIP %d/%d %d->%d\n", (int)b->inode->i_ino,
(int)b->fileaddr, oldphase, fs->phase);
if (b->inode->i_ino == 0 && b->fileaddr == 0)
- printk("FLIP %d/%d %d->%d\n", (int)b->inode->i_ino,
+ printk("FLIP %d/%d %d->%d\n", (int)b->inode->i_ino,
(int)b->fileaddr, oldphase, fs->phase);
BUG_ON(!test_bit(B_Pinned, &b->flags));
return;
}
-
if (! test_bit(B_InoIdx, &b->flags)) {
struct inode *ino = b->inode;
spin_lock(&ino->i_mapping->private_lock);
lai->md.fs.pblocks_used = 0;
}
-
if (oldphase)
clear_bit(B_Phase1, &b->flags);
else
/* FIXME lock this */
ulist = ib->uninc_next;
ib->uninc_next = NULL;
-
+
while (ulist) {
struct block *b2 = ulist;
ulist = b2->chain;
* and that have no pinned children
* The parent link is only needed if this block has a refcount or one
* of B_Pinned, B_Dirty, B_Alloc, B_Uninc
- * B_Lock is only needed if one of
+ * B_Lock is only needed if one of
* B_Dirty, B_Alloc
* or refcount
* though a refcount on a pinned datablock is only significant
* a phase change can (and should) happen for index blocks
* in the 'other' phase that are
* not Dirty, not Alloc, pincnt[oldphase]==0, uninc-table empty
- *
+ *
*/
void lafs_refile(struct block *b, int dec)
{
fs = fs_from_inode(b->inode);
/* sanity tests.
- * 1/ make sure pincnt is right
+ * 1/ make sure pincnt is right
*/
#if 0
if (dec &&
if (dec) {
if (putref_norefile(b)) {
/* last reference to a dblock with no page
- * requires special handling
+ * requires special handling
* The first block on a page must be freed,
* the other blocks hold a reference on that
* first block which must be dropped.
free_me = 1;
put_page(db->page);
}
- }
+ }
}
}
dec = 0;
credits++;
lafs_space_return(fs, credits);
if (b->inode->i_ino == 0 && b->fileaddr == 0)
- printk("E Dropped credit for %s\n", strblk(b));
+ printk("E Dropped credit for %s\n", strblk(b));
}
if (test_bit(B_Index, &b->flags) &&
!test_bit(B_Pinned, &b->flags)) {
}
//printk("X\n");
}
-
-
+
+
/*
* create (if it doesn't already exist) the 'iblock' for an inode.
* This is a shadow of the dblock but comes into it's own if/when
goto retry;
}
-
static u64
leaf_lookup(void *bf, int len, u32 startaddr, u32 target, u32 *nextp)
{
return ERR_PTR(err);
}
-
static int table_find_first(struct uninc *tbl, u32 min, u32 *addr);
static int __lafs_find_next(struct inode *ino, loff_t *addrp)
/*
* find the first data block at or after *bnump, and
* store the address in *bnump.
- * Return 0 if nothing found, -1 on error, or
+ * Return 0 if nothing found, -1 on error, or
* 1 if *bnump is a valid data block address
*
* The block could be in the indexing tree, or in
BUG();
}
else if (p->b.inode->i_ino == 0 && p->b.fileaddr == 0)
- printk("F Dropped credit for %s\n", strblk(&p->b));
+ printk("F Dropped credit for %s\n", strblk(&p->b));
}
if (!test_and_set_bit(B_UnincCredit, &p->b.flags))
if (!test_and_clear_bit(B_ICredit, &p->b.flags))
inodefile = LAFSI(sb->s_root->d_inode)->filesys;
else {
/* FIXME always use this branch?
- * FIXME is this OK for all sub-filesystems?
+ * FIXME is this OK for all sub-filesystems?
*/
struct fs *fs = sb->s_fs_info;
inodefile = fs->ss[0].root;
if (err)
goto err;
-
err = lafs_import_inode(ino, b);
if (err) {
printk("lafs_import_inode failed %d\n", err);
return lafs_iget(sb, inum, async);
}
-
int __must_check
lafs_import_inode(struct inode *ino, struct datablock *b)
{
li->depth = lai->depth;
dprintk("inode %lu type is %d\n", (unsigned long)ino->i_ino, li->type);
- BUG_ON(li->type == 0x6b6b6b6b); // use after free
+ BUG_ON(li->type == 0x6b6b6b6b); // use after free
ino->i_mapping->a_ops = &lafs_file_aops;
li->trunc_next = 0;
return 0;
}
-
/* Here is the guts of 'truncate'. We find the next leaf index
* block and discard all the addresses there-in.
*/
if (lafs_cluster_update_pin(&uh)==0) {
if (test_and_clear_bit(B_Dirty, &b->b.flags))
lafs_space_return(fs, 1);
- LAFSI(ino)->update_cluster =
+ LAFSI(ino)->update_cluster =
lafs_cluster_update_commit
(&uh, b, LAFS_INODE_LOG_START,
LAFS_INODE_LOG_SIZE);
}
if (wait) {
-
+
if (LAFSI(ino)->update_cluster)
lafs_cluster_wait(fs, LAFSI(ino)->update_cluster);
else {
}
unmap_dblock(db, lai);
lafs_dirty_dblock(db);
-}
+}
/*-----------------------------------------------------------------------
* Inode allocate map handling.
* This means we cannot clear it straight away, so two different threads
* might see the same inode number as being available. We have two
* approaches to guard against this.
- * Firstly we have a 'current' pointer into the inodemap file and
+ * Firstly we have a 'current' pointer into the inodemap file and
* increase that past the inode we return. This discourages multiple
* hits but as the pointer would need to be rewound occasionally it
* isn't a guarantee. The guarantee against multiple allocations is done
*/
static int
-choose_free_inum(struct fs *fs, struct inode *filesys, u32 *inump,
+choose_free_inum(struct fs *fs, struct inode *filesys, u32 *inump,
struct datablock **bp, int *restarted)
{
struct inode *im = lafs_iget(filesys->i_sb, 1, 0);
if (err)
goto abort;
if (b->b.physaddr == 0 && !test_bit(B_Valid, &b->b.flags)) {
- LAFSI(im)->md.inodemap.nextbit =
+ LAFSI(im)->md.inodemap.nextbit =
(im->i_sb->s_blocksize<<3) + 1;
goto retry;
}
find_first_bit(buf, blksize*8) == blksize*8)
/* block is empty, punch a hole */
hole = 1;
-
+
unmap_dblock(imni->mb, buf);
lafs_dirty_dblock(imni->ib);
if (hole)
}
bdev = fs->devs[dev].sb->s_bdev;
- return lafs_sync_page_io(bdev, sect, 0,
+ return lafs_sync_page_io(bdev, sect, 0,
blocks << fs->prime_sb->s_blocksize_bits,
p, 0) ? 0 : -EIO;
}
-
static void
bi_async_complete(struct bio *bio, int error)
{
struct async_complete *ac = bio->bi_private;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
- ac->state = 3;
+ ac->state = 3;
else
ac->state = 4;
bio_put(bio);
bdev = fs->devs[dev].sb->s_bdev;
ac->state = 2; /* loading */
ac->fs = fs;
- async_page_io(bdev, sect, 0,
+ async_page_io(bdev, sect, 0,
blocks << fs->prime_sb->s_blocksize_bits,
p, 0, ac);
return -EAGAIN;
return -EAGAIN;
}
-
static void
block_loaded(struct bio *bio, int error)
{
bio->bi_sector = sect;
bio_add_page(bio, page, fs->prime_sb->s_blocksize, offset);
-
bio->bi_private = b;
bio->bi_end_io = unlock ? block_loaded : block_loaded_nounlock;
submit_bio(READ, bio);
void lafs_iolock_block(struct block *b);
void lafs_iounlock_block(struct block *b, int bit);
-
void lafs_super_write(struct fs *fs, int dev, u64 addr, char *buf, int size);
int lafs_super_wait(struct fs *fs);
}
}
-
static inline void
virttophys(struct fs *fs, u64 virt, int *devp, sector_t *sectp)
{
kunmap_atomic(buf - dblock_offset(b), KM_USER1);
}
-
static inline void *map_iblock(struct indexblock *b)
{
if (test_bit(B_InoIdx, &b->b.flags))
static inline void decode_time(struct timespec *ts, u64 te)
{
/* low 35 bits are seconds (800 years)
- * high 29 bits are 2nanoseconds
+ * high 29 bits are 2nanoseconds
*/
ts->tv_sec = te& (0x7FFFFFFFFULL);
ts->tv_nsec = (te>>34) & ~(long)1;
tn <<= 34;
return t | tn;
}
-
static inline struct fs *fs_from_inode(struct inode *ino)
{
static inline int set_phase(struct block *b, int ph)
{
if (b->inode->i_ino == 0 && b->fileaddr == 0)
- printk("SETPHASE %d/%d %d\n", (int)b->inode->i_ino,
+ printk("SETPHASE %d/%d %d\n", (int)b->inode->i_ino,
(int)b->fileaddr, ph);
if (ph)
set_bit(B_Phase1, &b->flags);
return 0;
}
-
/*
* blocks (data and index) are reference counted.
* 'getref' increments the reference count, and could remove the
int __must_check lafs_setparent(struct datablock *blk);
-
/*
* extract little-endian values out of memory.
* Each function is given a char*, and moves it forwards
*/
#define decode16(p) ({ unsigned int _a; _a= (unsigned char)*(p++); \
- _a + (((unsigned char)*p++)<<8); })
+ _a + (((unsigned char)*p++)<<8); })
#define decode32(p) ({ u32 _b; _b = decode16(p); _b + ((u32)decode16(p)<<16); })
#define decode48(p) ({ u64 _c; _c = decode32(p); _c + ((u64)decode16(p)<<32); })
-
#define encode16(p,n) ({ *(p++) = (n)&255; *(p++) = ((n)>>8) & 255; })
#define encode32(p,n) ({ encode16(p,n); encode16(p, ((n)>>16)); })
#define encode48(p,n) ({ encode32(p,n); encode16(p, ((n)>>32)); })
return space;
}
-
struct dir_ent {
char *name;
int nlen;
int reserved;
};
-
int lafs_dir_blk_size(char *block, int psz);
void lafs_dir_clearparent(struct datablock *b);
struct block *lafs_dir_drop_parent(struct datablock *b);
u64 lafs_timestamp(void);
void lafs_destroy_inode(struct inode *inode);
-
/* checkpoint.c */
void lafs_checkpoint_lock(struct fs *fs);
void lafs_checkpoint_unlock(struct fs *fs);
void lafs_orphan_drop(struct fs *fs, struct datablock *b);
unsigned long lafs_run_orphans(struct fs *fs);
-
/* Segment.c */
int lafs_prealloc(struct block *b, int type);
int lafs_seg_ref(struct fs *fs, u64 addr, int ssnum);
int lafs_space_alloc(struct fs *fs, int credits, int why);
unsigned long lafs_scan_seg(struct fs *fs);
-
/* Cleaner */
int lafs_start_cleaner(struct fs *fs);
void lafs_wake_cleaner(struct fs *fs);
void lafs_quota_flush(struct fs *fs);
int lafs_quota_allocate(struct fs *fs, struct inode *ino, int diff);
-
-#define __wait_event_lock(wq, condition, lock) \
+#define __wait_event_lock(wq, condition, lock) \
do { \
wait_queue_t __wait; \
init_waitqueue_entry(&__wait, current); \
remove_wait_queue(&wq, &__wait); \
} while (0)
-#define wait_event_lock(wq, condition, lock) \
+#define wait_event_lock(wq, condition, lock) \
do { \
- if (condition) \
+ if (condition) \
break; \
__wait_event_lock(wq, condition, lock); \
} while (0)
u32 maxsnapshot;
u16 nextyouth;
u16 pad0;
-
+
u64 checkpointcluster; /* (array block) */
u64 root_inodes[0]; /* (array block) */
} __attribute__((packed));
u64 next_addr; /* (Array block) */
u64 this_addr; /* (array block) */
u64 prev_addr; /* (array block) */
- struct group_head groups[0];
+ struct group_head groups[0];
} __attribute__((packed));
#define CH_Checkpoint 1
#define IBLK_INDIRECT (1)
#define IBLK_EXTENT (2)
-
#define MaxDirHash 0x7fffffffUL
struct dirpiece {
u32 target; /* inode number */
* 0 if back (next[0]) is longer
* 1 if fore (next[1]) is longer
* 'chain_info' is
- * 0,1: add that number to the hash of filename
+ * 0,1: add that number to the hash of filename
* 2 : add one trailing byte to hash
* 3 : add 4 trailing bytes (little-endian) to hash
*/
* in the block_offset
*/
#define DIROP_LINK 0
-#define DIROP_UNLINK 1
+#define DIROP_UNLINK 1
#define DIROP_REN_SOURCE 2
#define DIROP_REN_NEW_TARGET 3
-#define DIROP_REN_OLD_TARGET 4
-
-
+#define DIROP_REN_OLD_TARGET 4
/*
* The orphan file has a very simple structure with
#include "lafs.h"
-
-
struct inode_operations lafs_link_ino_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
* It has an 'uninc_table' which is a short list of addresses to
* be incorporated.
*
- * Best option is that all the new addresses fit and we don't need to
+ * Best option is that all the new addresses fit and we don't need to
* split or even change between indirect and extent.
* But some times format changing and splitting are needed.
*
* To handle format changing we generally create a new table and
- * copy all data into it. We maintain some fast paths for the
+ * copy all data into it. We maintain some fast paths for the
* really simple updates such as indirect when all new blocks fit in the
* range, or index where all new ranges come at the end, and fit.
*
* with new addresses, a possibly empty old block, and a possibly empty
* list of addresses that didn't fit into the new block.
*
- * If the old block and the list of addresses are empty, we simply
+ * If the old block and the list of addresses are empty, we simply
* swizzle the block pointers and are done. If not, we have to split the
* index block and we need to inform the parent. If there is no parent
- * to inform (we are an inode) then we have to grow the depth of the
+ * to inform (we are an inode) then we have to grow the depth of the
* index tree.
*
* Awkward details:
* - If an index block becomes empty, we can mark it a Hole.
* - If the inode block has only one index, maybe we should
* shrink the tree.
- *
+ *
* Memory allocation issues.
- * We are potentially in the write path for freeing memory when
+ * We are potentially in the write path for freeing memory when
* performing incorporation so we have to be careful about memory
* allocations.
* Sometimes we need to allocate a new page for a page split. It seems
* new pages early enough... We will have to worry about that later.
*/
-
static int addr_cmp(const void *av, const void *bv)
{
const struct addr *a=av;
*/
while (b[0] || b[1]) {
if (b[next] == NULL ||
- (b[1-next] != NULL &&
+ (b[1-next] != NULL &&
!((prev <= b[1-next]->fileaddr)
^(b[1-next]->fileaddr <= b[next]->fileaddr)
^(b[next]->fileaddr <= prev)))
if (repcnt == icnt) {
/* all currently incorporated addresses are
* replaced or removed, so just install the
- * new addresses
+ * new addresses
*/
int credits = 0;
b = buf;
static void grow_index_tree(struct indexblock *ib, struct indexblock *new)
{
- /* leaf_incorporate or internal_incorporate was working on an inode
+ /* leaf_incorporate or internal_incorporate was working on an inode
* and didn't find enough space.
* So demote 'ib' to a regular Index block and make 'new' a new
* InoIdx block (inode->iblock);
if (phys == ~0LL) {
return 0;
}
-
+
if (li->size < 10)
return 0;
return len;
}
-
struct leafinfo {
u32 firstaddr;
u32 nextaddr;
* Entries are 10 bytes: 6 byte dev address, 4 byte file address.
*/
char *buf = *bufp;
-
+
handle(data, addr, 0); /* initialise */
while (len >= 10 || uninc != NULL) {
elen = decode16(buf);
eaddr = decode32(buf);
len -= 12;
- BUG_ON(ephys == 0 && elen != 0); // FIXME fail gracefully
+ BUG_ON(ephys == 0 && elen != 0); // FIXME fail gracefully
} else
eaddr = 0xFFFFFFFFUL;
}
}
}
-
static int do_incorporate_leaf(struct fs *fs, struct indexblock *ib,
struct uninc *ui,
struct indexblock *new)
* data buffers for new and ib need to be swapped
*/
- /* There is nowhere that we can safely put any index info
+ /* There is nowhere that we can safely put any index info
* that is still in 'ui' except into the new block with the
* remains of the 'ib' addresses.
* It had better fit. And it will.
if (ui->pending_addr[uinum].fileaddr < next) {
int cnt = ui->pending_addr[uinum].cnt
- (next - ui->pending_addr[uinum].fileaddr);
- ui->pending_addr[uinxt].physaddr = ui->pending_addr[uinum].physaddr
+ ui->pending_addr[uinxt].physaddr = ui->pending_addr[uinum].physaddr
+ (next - ui->pending_addr[uinum].fileaddr);
ui->pending_addr[uinxt].fileaddr = next;
ui->pending_addr[uinxt].cnt = cnt;
case 3: /* Need to grow */
/* new needs a B_Credit and a B_ICredit.
*/
-
+
uit.credits -= 2;
set_bit(B_Credit, &new->b.flags);
set_bit(B_ICredit, &new->b.flags);
lafs_incorporate(fs, ib);
}
-
/***************************************************************
* Space pre-allocation
* We need to make sure that the block and all parents
}
#endif
-
int lafs_orphan_prepare(struct fs *fs, struct orphan_info *oi)
{
struct orphan_md *om = &LAFSI(fs->orphans)->md.orphan;
u32 bnum;
struct datablock *ob;
- if (test_bit(B_Orphan, &b->b.flags))
+ if (test_bit(B_Orphan, &b->b.flags))
/* FIXME I need to make sure it stays an orphan... */
return 0;
mutex_lock_nested(&fs->orphans->i_mutex, I_MUTEX_QUOTA);
om->nextfree++;
om->reserved--;
-
or = map_dblock(ob);
ent = slot - (bnum << (fs->prime_sb->s_blocksize_bits-4));
or[ent].type = cpu_to_le32(n);
struct orphan_md *om = &LAFSI(fs->orphans)->md.orphan;
mutex_lock_nested(&fs->orphans->i_mutex, I_MUTEX_QUOTA);
om->reserved--;
- bnum = (om->nextfree + om->reserved) >>
+ bnum = (om->nextfree + om->reserved) >>
(fs->prime_sb->s_blocksize_bits-4);
b = lafs_get_block(fs->orphans, bnum, NULL, GFP_KERNEL);
/* Note that we now own two references to this block, one
}
}
-
/*
* When any processing of an orphan makes it not an orphan any more
* (e.g. link is created for a file, directory block is cleaned)
else bbl = NULL;
putdref(bi);
dprintk("O bfs=%p(%p) bi=%p bbl=%p lastent=%d fs=%d in=%d a=%d\n",
- bfs, bfs->my_inode, bi, bbl, lastent,
+ bfs, bfs->my_inode, bi, bbl, lastent,
le32_to_cpu(last.filesys),
le32_to_cpu(last.inum),
le32_to_cpu(last.addr)
om->nextfree--;
om->reserved++;
clear_bit(B_Orphan, &b->b.flags);
-
+
/* Now drop the reservation we just synthesised */
om->reserved--;
bnum = (om->nextfree + om->reserved) >> (b->b.inode->i_blkbits-4);
#include "lafs.h"
-
void lafs_qcommit(struct fs *fs, struct inode *ino, int diff, int phase)
{
}
#include "lafs.h"
-
#if 0
static int find_super(struct fs *fs, int ssnum, u64 *addr)
{
return -ENOMEM;
}
- dprintk("Super-cluster for %d is at %llu\n",
+ dprintk("Super-cluster for %d is at %llu\n",
ssnum, (unsigned long long)fs->ss[ssnum].checkpointcluster);
err = lafs_load_page(fs, p, fs->ss[ssnum].checkpointcluster, 1);
}
#endif
-
static int
roll_valid(struct fs *fs, struct cluster_head *ch, unsigned long long addr)
{
return 1;
}
-
/*
* roll_locate returns 0 if proper endpoints were found,
* or -EINVAL?? if CheckpointStart and CheckpointEnd weren't found properly
* "next" will contain the address of the next cluster to be written to,
- * "last" the cluster before that, and "seq" the seq number for next cluster
+ * "last" the cluster before that, and "seq" the seq number for next cluster
*/
static int
roll_locate(struct fs *fs, u64 start,
*
*
*/
-
+
last = prev;
start = this;
prev2 = prev;
//printk("Looks good\n");
/* FIXME check checksum, and possibly VerifySum */
/* this head looks valid, so we can possibly verify previous
- * clusters
+ * clusters
*/
if (le16_to_cpu(ch->Hlength) > max)
max = le16_to_cpu(ch->Hlength);
struct lafs_inode *li;
int err = 0;
-
if (flg)
return 0; /* "old" blocks aren't interesting */
if (type == DescIndex)
int len = le16_to_cpu(mb->length) - DescMiniOffset;
err = roll_mini(fs, fsnum, inum, trunc, flg,
bnum, offset, len, (char*)(mb+1));
-
+
mb++;
- mb = (struct miniblock *)(((char*)mb) + ROUND_UP(len));
+ mb = (struct miniblock *)(((char*)mb) + ROUND_UP(len));
desc = (struct descriptor *)mb;
}
j++;
putdref(b);
dprintk("name is %s\n", LAFSI(root)->md.fs.name);
-
unlock_new_inode(root);
/* FIXME lots of error checking */
*
* Normally when ->physaddr changes, the counts in the relevant
* segusage blocks are updated directly. However when the change is
- * for a write in the next phase (during a checkpoint) that is not
+ * for a write in the next phase (during a checkpoint) that is not
* possible as the segusage blocks need to be written with the old value.
* So we track these updates in a simple data structure that
* records delta against each ss+dev+seg. If such a data structure
}
}
-
/* lafs_seg_flush_all
* All segment usage tables should be flushed to storage.
* This is called towards the end of performing a checkpoint, after
write_inode_now(fs->devs[d].segsum, 1);
}
-
static void seg_apply(struct fs *fs, struct segsum *ss)
{
void *buf;
void lafs_seg_apply_all(struct fs *fs)
{
int i;
-
+
for (i=0 ; i<SHASHSIZE ; i++) {
struct hlist_head *head = &fs->stable[i];
struct segsum *ss;
return err;
}
-
/*************************************************************
* Space management: allocate, use, free
*/
* - free: this entry is for a free segment
* - cleanable: this entry is for a cleanable segment.
* - clean: segment has been cleaned but is not yet free (awaiting checkpoint).
- * These are singly linked lists (via 'next'). We record head and tail.
+ * These are singly linked lists (via 'next'). We record head and tail.
* The end of this list has a pointer to 0xFFFF, not 0.
* We usually remove entries from the head, but when the table is
* full, we might walk part way down a list and discard all the rest.
u16 skip[0]; /* or larger... */
};
-
static inline struct segstat *segfollow(struct segtracker *st, u16 link)
{
void *a;
if (cnt != st->cleanable.cnt) { printk("%d != %d\n", cnt, st->cleanable.cnt); WARN_ON(1); return 1;}
if (st->cleanable.last != prev) { printk("L%d != %d\n", prev, st->cleanable.last); WARN_ON(1); return 1;}
-
return 0;
}
return h;
}
-
static void seginsert(struct segtracker *st, u16 ssn, u16 *where[SEG_MAX_HEIGHT])
{
/* We looked for 'ss' but didn't find it. 'where' is the result of looking.
}
}
}
-
void lafs_free_get(struct fs *fs, unsigned int *dev, u32 *seg, int nonlogged)
{
ss = segfollow(fs->segtrack, fs->segtrack->free.first);
BUG_ON(!ss);
-
+
*dev = ss->dev;
*seg = ss->segment;
spin_unlock(&fs->lock);
return;
}
- if (fs->segtrack->free.cnt + fs->segtrack->clean.cnt >=
+ if (fs->segtrack->free.cnt + fs->segtrack->clean.cnt >=
fs->segtrack->total / 2) {
/* Have enough free/clean entries already */
spin_unlock(&fs->lock);
ss->next = fs->segtrack->clean.first;
fs->segtrack->clean.first = where[0][0];
if (fs->segtrack->clean.last == 0xFFFF)
- fs->segtrack->clean.last =
+ fs->segtrack->clean.last =
fs->segtrack->clean.first;
fs->segtrack->clean.cnt++;
printk("a");
return;
}
- if (fs->segtrack->free.cnt + fs->segtrack->clean.cnt >=
+ if (fs->segtrack->free.cnt + fs->segtrack->clean.cnt >=
fs->segtrack->total / 2) {
/* Have enough free/clean entries already */
spin_unlock(&fs->lock);
ss->next = fs->segtrack->clean.first;
fs->segtrack->clean.first = ssn;
if (fs->segtrack->clean.last == 0xFFFF)
- fs->segtrack->clean.last =
+ fs->segtrack->clean.last =
fs->segtrack->clean.first;
fs->segtrack->clean.cnt ++;
ss->dev = dev;
printk("============= Cleanable table (%d) =================\n",
st->cleanable.cnt);
printk("pos: dev/seg usage score\n");
-
+
i = 0;
for (ssn = st->cleanable.first; ssn != 0xffff; ssn = ss->next) {
ss = segfollow(st, ssn);
u[i] = d[i];
}
-
unsigned long lafs_scan_seg(struct fs *fs)
{
/* Process one block of youth or segment-usage
int segcount;
int blks;
- while (fs->scan.free_block >
+ while (fs->scan.free_block >
(fs->devs[fs->scan.free_dev].segment_count
>> (fs->prime_sb->s_blocksize_bits - 1))) {
fs->scan.free_dev++;
char *d;
db = lafs_get_block(fs->devs[fs->scan.free_dev].segsum,
- fs->scan.free_block +
+ fs->scan.free_block +
fs->devs[fs->scan.free_dev].tablesize,
NULL, GFP_KERNEL);
if (!db) {
#include <linux/namei.h>
#include <linux/crc32.h>
-
/*
* Mounting a snapshot is very different from mounting a new
* filesystem.
#define QHASHSIZE (1<<QHASHBITS)
#define QHASHMASK (QHASHSIZE-1)
-
/* skip points are used to accelerate sequential-order insert
* in the list of blocks pending write
*/
struct skippoint *next[SKIP_MAX_HEIGHT];
};
-
#define WC_NUM 3 /* 3 active write-clusters: new, clean, and defrag */
struct fs {
struct lafs_state *state;
int phase_locked;
int phase; /* 0 or 1 */
wait_queue_head_t phase_wait; /* Also use to wait for first_free_pass */
-
+
/* flags to set on next cluster. */
int checkpointing;
int rolled; /* set when rollforward has completed */
#define CleanerRunning 1
#define CleanerNeeded 2
- struct work_struct done_work; /* used for handling
+ struct work_struct done_work; /* used for handling
* refile after write completes */
-
struct {
int active; /* number of actively cleaned segments */
u32 cleaning; /* amount of space that is being cleaned
} segtrack[1];
/*
- * NOTE: there should probably be a free list for each 'level'
+ * NOTE: there should probably be a free list for each 'level'
*/
/* For scan */
struct wc {
/* A 'write-cluster' descriptor
- * Any write-cluster that we are in the process of
+ * Any write-cluster that we are in the process of
* building has this structure to describe it.
* We gather dirty blocks and keep track of cluster-header usage
*/
struct cluster_head *chead; /* the cluster head == page */
int chead_blocks; /* blocks allocated for cluster head */
int cluster_space; /* space remaining in cluster_head
- * after current commitments
+ * after current commitments
*/
int chead_size; /* space used already in cluster_head
* new miniblock and allocation
struct hlist_head stable[SHASHSIZE];
spinlock_t stable_lock;
-
};
static inline int test_phase_locked(struct fs *fs)
/* There are two sorts of blocks, data blocks and index blocks.
* Data blocks store the contents of files, including inode files.
* So each inode is in a datablock.
- * Index blocks store index/indirect/extent blocks. As inodes often
+ * Index blocks store index/indirect/extent blocks. As inodes often
* contain index information, Inode will usually have an index block aswell.
*
* Data blocks are stored in an address space and are indexed by inode and offset.
- * Index blocks are indexed by inode, depth, and offset. The "depth" is
+ * Index blocks are indexed by inode, depth, and offset. The "depth" is
* distance from data, so indirect and extent blocks have depth of 1.
* This index doesn't often change.
*
- * All blocks have a pointer to their parent, which is an index block (except
+ * All blocks have a pointer to their parent, which is an index block (except
* for inodes?).
* All index blocks have a linked list of children which is used to find children
* to move when the block is split.
u64 physaddr;
struct indexblock *parent;
- struct list_head siblings; /* Next unreachable block in the same
+ struct list_head siblings; /* Next unreachable block in the same
* reachability-set as this block
*/
-
+
struct list_head lru; /* phase_leafs, clean_leafs,
* clhead, pending_blocks */
* in storage (i.e. in another snapshot)
*/
- struct block *chain; /* on list of unincorporated changes */
+ struct block *chain; /* on list of unincorporated changes */
};
struct datablock {
};
};
struct indexblock {
- struct block b;
+ struct block b;
char * data;
- struct hlist_node hash;
+ struct hlist_node hash;
int depth;
struct list_head children;
- /*
+ /*
* pincnt[p] is the number of pinned blocks in phase 'p' which
* have us as their ->parent.
*/
*/
struct block *uninc_next;
};
-
+
#define iblk(__bl) container_of(__bl, struct indexblock, b)
#define dblk(__bl) container_of(__bl, struct datablock, b)
* cleaning purposes and so should be written to the
* cleaner segment.
*/
-#define B_Valid b(6) /* block contains valid data */
+#define B_Valid b(6) /* block contains valid data */
#define B_PinPending b(7) /* set on data blocks while checkpoint_locked if we might
* want to mark them dirty
*/
lai->md.fs.pblocks_used += diff;
else
lai->md.fs.cblocks_used += diff;
-
+
if (!is_index) {
if (diff > 0)
lai->md.fs.ablocks_used --;
static struct super_operations lafs_sops;
-
u64 lafs_timestamp(void)
{
u64 stamp;
stamp = (stamp << 32) | ts.tv_sec;
return stamp;
}
-
/*---------------------------------------------------------------------
* Write out state and super blocks
return 0;
}
-
static int
valid_devblock(struct lafs_dev *db, sector_t addr)
{
{
/* Find the devblock and the stateblock for this device
-
* Only do basic internal consistancy checks. Inter-device
* checks happen later
*/
}
fs->checkpointcluster = le64_to_cpu(st->checkpointcluster);
for (i=0; i<fs->maxsnapshot; i++) {
- fs->ss[i].root_addr =
+ fs->ss[i].root_addr =
le64_to_cpu(st->root_inodes[i]);
dprintk("root inode %d are %llu\n",
i, fs->ss[i].root_addr);
for (i=0; i < 16 / 4 ; i ++)
fsid ^= le32_to_cpu(fsuuid[i]);
-
spin_lock(&root->vfs_inode.i_lock);
buf->f_type = 0x4C614654; /* "LaFS" */
buf->f_bsize = de->d_inode->i_sb->s_blocksize;
return strlen(buffer);
}
-
module_param_call(dump,do_dump,get_dump,0, 0775);
#endif