int last;
if (inum)
- inode = lafs_iget(dir->i_sb, inum, SYNC);
+ inode = lafs_iget(LAFSI(dir)->filesys, inum, SYNC);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (!inode && dirop != DIROP_REN_TARGET)
if (err < 0)
ERR_PTR(err);
- ino = lafs_iget(dir->i_sb, inum, SYNC);
+ ino = lafs_iget(LAFSI(dir)->filesys, inum, SYNC);
if (IS_ERR(ino))
return ERR_PTR(PTR_ERR(ino));
* is slightly non-trivial.
* iget*_locked will normally wait for any inode with one
* of the flags I_FREEING I_CLEAR I_WILL_FREE I_NEW
- * to either be unhashed or has the flag cleared.
+ * to either be unhashed or have the flag cleared.
* We cannot afford that wait in the cleaner as we could deadlock.
* So we use iget5_locked and provide a test function that fails
* if it finds the inode with any of those flags set.
- * If it does see the inode like that it clear the inum
- * that is passed in (by reference) so that it knows to continue
+ * If it does see the inode like that it sets a flag in the 'ikey'
+ * that is passed in by reference so that it knows to continue
* failing (for consistency) and so that the 'set' function
* we provide can know to fail the 'set'.
* The result of this is that if iget finds an inode it would
- * have to wait on, the inum is cleared and NULL is returned.
+ * have to wait on, a flag is set and NULL is returned.
* An unfortunate side effect is that an inode will be allocated
* and then destroyed to no avail.
* This is avoided by calling ilookup5 first. This also allows
* us to only allocate/load the data block if there really seems
* to be a need.
*/
-#define NO_INO (~(ino_t)0)
+struct ikey {
+ ino_t inum;
+ struct inode *fsys;
+ bool was_busy;
+};
+
+static int sync_itest(struct inode *inode, void *data)
+{
+ struct ikey *ik = data;
+
+ if (inode->i_ino != ik->inum ||
+ LAFSI(inode)->filesys != ik->fsys)
+ return 0;
+ return 1;
+}
+
static int async_itest(struct inode *inode, void *data)
{
- ino_t *inump = data;
- ino_t inum = *inump;
+ struct ikey *ik = data;
- if (inum == NO_INO)
+ if (ik->was_busy)
/* found and is freeing */
return 0;
- if (inode->i_ino != inum)
+ if (!sync_itest(inode, data))
return 0;
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) {
- *inump = NO_INO;
+ ik->was_busy = true;
return 0;
}
return 1;
}
-static int async_iset(struct inode *inode, void *data)
+static int iset(struct inode *inode, void *data)
{
- ino_t *inump = data;
- if (!*inump)
+ struct ikey *ik = data;
+ if (ik->was_busy)
return -EBUSY;
- inode->i_ino = *inump;
+ inode->i_ino = ik->inum;
+ LAFSI(inode)->filesys = ik->fsys;
return 0;
}
struct inode *
-lafs_iget(struct super_block *sb, ino_t inum, int async)
+lafs_iget(struct inode *fsys, ino_t inum, int async)
{
/* find, and load if needed, this inum */
struct inode *ino = NULL;
struct inode *oldino;
struct datablock *b = NULL;
- struct inode *inodefile;
- struct sb_key *k;
+ struct ikey ik = { .inum = inum, .fsys = fsys, };
int err = 0;
-
- BUG_ON(inum == NO_INO);
-
- k = sb->s_fs_info;
- inodefile = k->root;
+ struct super_block *sb = fsys->i_private;
if (async) {
/* We cannot afford to block on 'freeing_inode'
* alloc/free if the inode is locked in some way.
*/
while (!ino) {
- ino_t inum2 = inum;
+
err = 0;
- ino = ilookup5(sb, inum, async_itest, &inum2);
+ ino = ilookup5(sb, inum, async_itest, &ik);
if (ino)
break;
- if (inum2 == NO_INO)
+ if (ik.was_busy)
err = -EAGAIN;
/* For async we will always want the dblock loaded,
* to fail -EAGAIN once we have an I_NEW inode.
*/
if (!b)
- b = lafs_get_block(inodefile, inum, NULL,
+ b = lafs_get_block(fsys, inum, NULL,
GFP_NOFS, MKREF(iget));
if (!b)
return ERR_PTR(-ENOMEM);
if (!err) {
/* Have the block, so safe to iget */
- inum2 = inum;
ino = iget5_locked(sb, inum,
- async_itest, async_iset,
- &inum2);
+ async_itest, iset,
+ &ik);
if (!ino) {
- if (inum2 == NO_INO)
+ if (ik.was_busy)
err = -EAGAIN;
else
err = -ENOMEM;
}
}
} else
- ino = iget_locked(sb, inum);
+ ino = iget5_locked(sb, inum, sync_itest, iset, &ik);
if (!ino) {
putdref(b, MKREF(iget));
return ERR_PTR(-ENOENT);
}
- LAFSI(ino)->filesys = igrab(inodefile);
+ igrab(LAFSI(ino)->filesys);
/* surprisingly the inode bdi does not default to the
* super_blocks bdi...
/* Need to load block 'inum' from an inode file...
*/
if (!b) {
- b = lafs_get_block(inodefile, inum, NULL, GFP_KERNEL, MKREF(iget));
+ b = lafs_get_block(fsys, inum, NULL, GFP_KERNEL, MKREF(iget));
if (!b)
err = -ENOMEM;
else
out:
if (b && test_and_clear_bit(B_Async, &b->b.flags)) {
putdref(b, MKREF(async));
- lafs_wake_thread(fs_from_sb(sb));
+ lafs_wake_thread(fs_from_inode(fsys));
}
putdref(b, MKREF(iget));
return ino;
struct inode *filesys;
struct super_block *sb2;
- filesys = lafs_iget(sb, fsnum, async);
+ filesys = lafs_iget(fs->ss[0].root, fsnum, async);
if (IS_ERR(filesys))
return filesys;
if (LAFSI(filesys)->type != TypeInodeFile) {
iput(filesys);
return ERR_PTR(PTR_ERR(sb2));
}
- rv = lafs_iget(sb2, inum, async);
+ rv = lafs_iget(filesys, inum, async);
if (IS_ERR(rv))
deactivate_locked_super(sb2);
else
up_write(&sb2->s_umount);
- } else {
- rv = lafs_iget(sb, inum, async);
+ } else if (inum) {
+ rv = lafs_iget(fs->ss[0].root, inum, async);
if (!IS_ERR(rv))
atomic_inc(&sb->s_active);
+ } else {
+ rv = igrab(fs->ss[0].root);
+ atomic_inc(&sb->s_active);
}
return rv;
}
choose_free_inum(struct fs *fs, struct super_block *sb, u32 *inump,
struct datablock **bp, int *restarted)
{
- struct inode *im = lafs_iget(sb, 1, SYNC);
+ struct inode *im = lafs_iget(ino_from_sb(sb), 1, SYNC);
loff_t bnum;
struct datablock *b;
char *buf;
lafs_iounlock_block(&b->b);
inode_map_new_commit(&imni);
- ino = lafs_iget(sb, b->b.fileaddr, SYNC);
+ ino = lafs_iget(ino_from_sb(sb), b->b.fileaddr, SYNC);
if (IS_ERR(ino)) {
lafs_cluster_update_abort(&ui);
LAFS_BUG(1, &b->b);
static int inode_map_free(struct fs *fs, struct super_block *sb, u32 inum)
{
- struct inode *im = lafs_iget(sb, 1, SYNC);
+ struct inode *im = lafs_iget(ino_from_sb(sb), 1, SYNC);
int bit;
unsigned long *buf;
struct datablock *b;
/* This is used during roll-forward to register a newly created
* inode in the inode map
*/
- struct inode *im = lafs_iget(sb, 1, SYNC);
+ struct inode *im = lafs_iget(ino_from_sb(sb), 1, SYNC);
int bit;
unsigned long *buf;
struct datablock *b;
/* inode.c */
void lafs_add_atime_offset(struct timespec *atime, int offset);
int __must_check lafs_mount(struct fs *fs);
-struct inode *lafs_iget(struct super_block *fs, ino_t inum, int async);
+struct inode *lafs_iget(struct inode *filesys, ino_t inum, int async);
struct inode *lafs_iget_fs(struct fs *fs, int fsnum, int inum, int async);
int __must_check lafs_import_inode(struct inode *ino, struct datablock *b);
void lafs_inode_checkpin(struct inode *ino);
lafs_mount(struct fs *fs)
{
struct datablock *b = NULL;
- struct inode *ino;
+ struct inode *rootino;
struct inode *rootdir;
+ struct inode *aino, *oino;
struct dentry *de;
int err;
int d;
int orphan_count;
fs->rolled = 0;
- fs->ss[0].root = ino = iget_locked(fs->prime_sb, 0);
- LAFSI(ino)->filesys = ino;
- k->root = ino;
+ fs->ss[0].root = rootino = iget_locked(fs->prime_sb, 0);
+ k->root = rootino;
+ LAFSI(rootino)->filesys = rootino;
+
+ rootino->i_private = fs->prime_sb;
err = -ENOMEM;
- if (!ino)
+ if (!rootino)
goto err;
- b = lafs_get_block(ino, 0, NULL, GFP_KERNEL, MKREF(mount));
+ b = lafs_get_block(rootino, 0, NULL, GFP_KERNEL, MKREF(mount));
if (!b)
goto err;
set_bit(B_Root, &b->b.flags);
if (err)
goto err;
- err = lafs_import_inode(ino, b);
+ err = lafs_import_inode(rootino, b);
if (err)
goto err;
putdref(b, MKREF(mount));
b = NULL;
- unlock_new_inode(ino);
+ unlock_new_inode(rootino);
- rootdir = lafs_iget(fs->prime_sb, 2, SYNC);
+ rootdir = lafs_iget(rootino, 2, SYNC);
err = PTR_ERR(rootdir);
if (IS_ERR(rootdir))
goto err;
}
fs->prime_sb->s_root = de;
- ino = lafs_iget(fs->prime_sb, 8, SYNC);
- err = PTR_ERR(ino);
- if (IS_ERR(ino))
+ oino = lafs_iget(rootino, 8, SYNC);
+ err = PTR_ERR(oino);
+ if (IS_ERR(oino))
goto err;
- if (LAFSI(ino)->type != TypeOrphanList) {
- iput(ino);
+ if (LAFSI(oino)->type != TypeOrphanList) {
+ iput(oino);
err = -EINVAL;
goto err;
}
- fs->orphans = ino;
+ fs->orphans = oino;
for (d = 0; d < fs->devices ; d++) {
- ino = lafs_iget(fs->prime_sb,
- fs->devs[d].usage_inum,
- SYNC);
- err = PTR_ERR(ino);
- if (IS_ERR(ino))
+ struct inode *sino = lafs_iget(rootino,
+ fs->devs[d].usage_inum,
+ SYNC);
+ err = PTR_ERR(sino);
+ if (IS_ERR(sino))
goto err;
- if (LAFSI(ino)->type != TypeSegmentMap) {
- iput(ino);
+ if (LAFSI(sino)->type != TypeSegmentMap) {
+ iput(sino);
err = -EINVAL;
goto err;
}
- fs->devs[d].segsum = ino;
+ fs->devs[d].segsum = sino;
}
orphan_count = lafs_count_orphans(fs->orphans);
LAFSI(fs->orphans)->md.orphan.nextfree = orphan_count;
INIT_LIST_HEAD(&fs->cleaner.seg[d].cleaning);
}
- ino = lafs_iget(fs->prime_sb, 3, SYNC);
- if (!IS_ERR(ino)) {
- if (LAFSI(ino)->type != TypeAccessTime) {
- iput(ino);
+ aino = lafs_iget(rootino, 3, SYNC);
+ if (!IS_ERR(aino)) {
+ if (LAFSI(aino)->type != TypeAccessTime) {
+ iput(aino);
err = -EINVAL;
} else
- LAFSI(fs->ss[0].root)->md.fs.accesstime = ino;
- } else if (PTR_ERR(ino) != -ENOENT)
- err = PTR_ERR(ino);
+ LAFSI(fs->ss[0].root)->md.fs.accesstime = aino;
+ } else if (PTR_ERR(aino) != -ENOENT)
+ err = PTR_ERR(aino);
err:
putdref(b, MKREF(mount));
/* already existed */
kfree(sk);
} else {
- struct inode *rootdir;
+ struct inode *rootino, *rootdir;
struct datablock *b;
sb->s_flags = flags | MS_RDONLY;
atomic_inc(&fs->prime_sb->s_active);
- fs->ss[s].root = sk->k.root = iget_locked(sb, 0);
- LAFSI(fs->ss[s].root)->filesys = fs->ss[s].root;
- b = lafs_get_block(fs->ss[s].root, 0, NULL, GFP_KERNEL,
+ rootino = iget_locked(sb, 0);
+ rootino->i_private = sb;
+ fs->ss[s].root = sk->k.root = rootino;
+ LAFSI(rootino)->filesys = rootino;
+ b = lafs_get_block(rootino, 0, NULL, GFP_KERNEL,
MKREF(snap));
b->b.physaddr = fs->ss[s].root_addr;
set_bit(B_PhysValid, &b->b.flags);
if (!err)
err = lafs_import_inode(fs->ss[s].root, b);
putdref(b, MKREF(snap));
- unlock_new_inode(fs->ss[s].root);
+ unlock_new_inode(rootino);
if (err) {
deactivate_locked_super(sb);
goto fail;
}
- rootdir = lafs_iget(sb, 2, SYNC);
+ rootdir = lafs_iget(rootino, 2, SYNC);
sb->s_root = d_alloc_root(rootdir);
sb->s_op = fs->prime_sb->s_op;
sb->s_flags |= MS_ACTIVE;
struct inode *rootdir, *imapfile;
int err = 0;
+ ino->i_private = sb;
+
igrab(ino);
sb->s_blocksize = fs->blocksize;
sb->s_blocksize_bits = fs->blocksize_bits;
sb->s_op = &lafs_sops;
sb->s_export_op = &lafs_export_ops;
sb->s_time_gran = 2;
- rootdir = lafs_iget(sb, 2, SYNC);
+ rootdir = lafs_iget(ino, 2, SYNC);
if (IS_ERR(rootdir) && PTR_ERR(rootdir) == -ENOENT) {
rootdir = lafs_new_inode(fs, sb, NULL,
TypeDir, 2, 0755, NULL);
err = PTR_ERR(rootdir);
else {
sb->s_root = d_alloc_root(rootdir);
- imapfile = lafs_iget(sb, 1, SYNC);
+ imapfile = lafs_iget(ino, 1, SYNC);
if (IS_ERR(imapfile) && PTR_ERR(imapfile) == -ENOENT)
imapfile = lafs_new_inode(fs, sb, NULL,
TypeInodeMap, 1, 0, NULL);
}
if (!err) {
- struct inode *atime = lafs_iget(sb, 3, SYNC);
+ struct inode *atime = lafs_iget(ino, 3, SYNC);
if (!IS_ERR(atime)) {
if (LAFSI(atime)->type != TypeAccessTime) {
iput(atime);
{
struct inode *inode;
- inode = lafs_iget(sb, ino, SYNC);
+ inode = lafs_iget(ino_from_sb(sb), ino, SYNC);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
static struct dentry *lafs_get_parent(struct dentry *child)
{
ino_t inum = LAFSI(child->d_inode)->md.file.parent;
- struct inode *inode = lafs_iget(child->d_inode->i_sb, inum, SYNC);
+ struct inode *inode = lafs_iget(LAFSI(child->d_inode)->filesys,
+ inum, SYNC);
if (IS_ERR(inode))
return ERR_CAST(inode);
return d_obtain_alias(inode);