lafs_dirty_dblock(b);
putdref(b, MKREF(inode_map_free));
lafs_checkpoint_unlock(fs);
+ mutex_unlock(&im->i_mutex);
+ iput(im);
+ return 0;
+}
+
+int lafs_inode_inuse(struct fs *fs, struct super_block *sb, u32 inum)
+{
+ /* This is used during roll-forward to register a newly created
+ * inode in the inode map
+ */
+ struct inode *im = lafs_iget(sb, 1, SYNC);
+ int bit;
+ unsigned long *buf;
+ struct datablock *b;
+ u32 bnum;
+ int err;
+
+ mutex_lock_nested(&im->i_mutex, I_MUTEX_QUOTA);
+
+ bnum = inum >> (3 + sb->s_blocksize_bits);
+ bit = inum - (bnum << (3 + sb->s_blocksize_bits));
+ if (bnum > LAFSI(im)->md.inodemap.size) {
+ /* inum to unbelievably big */
+ mutex_unlock(&im->i_mutex);
+ iput(im);
+ return -EINVAL;
+ }
+ b = lafs_get_block(im, bnum, NULL, GFP_KERNEL, MKREF(inode_map_free));
+ if (!b) {
+ mutex_unlock(&im->i_mutex);
+ iput(im);
+ return -ENOMEM;
+ }
+
+ err = lafs_read_block(b);
+ if (err) {
+ putdref(b, MKREF(inode_map_free));
mutex_unlock(&im->i_mutex);
+ iput(im);
+ return err;
+ }
+
+ lafs_iolock_written(&b->b);
+ set_bit(B_PinPending, &b->b.flags);
+ lafs_iounlock_block(&b->b);
+retry:
+ lafs_checkpoint_lock(fs);
+ err = lafs_pin_dblock(b, CleanSpace);
+ if (err == -EAGAIN) {
+ lafs_checkpoint_unlock_wait(fs);
+ goto retry;
+ }
+ BUG_ON(err < 0);
+ buf = map_dblock(b);
+ if (bnum == LAFSI(im)->md.inodemap.size) {
+ /* need to add a new block to the file */
+ memset(buf, 0xff, fs->blocksize);
+ LAFSI(im)->md.inodemap.size = bnum + 1;
+ lafs_dirty_inode(im);
+ }
+ generic___clear_le_bit(bit, buf);
+ unmap_dblock(b, buf);
+ lafs_dirty_dblock(b);
+ putdref(b, MKREF(inode_map_free));
+ lafs_checkpoint_unlock(fs);
+ mutex_unlock(&im->i_mutex);
iput(im);
return 0;
}
+
+
int lafs_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
struct datablock *lafs_inode_dblock(struct inode *ino, int async, REFARG);
struct datablock *lafs_inode_get_dblock(struct inode *ino, REFARG);
int lafs_inode_handle_orphan(struct datablock *b);
+int lafs_inode_inuse(struct fs *fs, struct super_block *sb, u32 inum);
static inline void lafs_iput_fs(struct inode *ino)
{
u32 bnum, int offset, int len, char *data)
{
struct inode *inode;
+ struct inode *fsinode;
struct lafs_inode *li;
struct datablock *db = NULL;
int err = 0;
break;
}
- lafs_iput_fs(inode);
+ fsinode = inode;
inode = lafs_iget_fs(fs, inum, bnum, SYNC);
if (IS_ERR(inode)) {
+ struct super_block *sb;
err = PTR_ERR(inode);
- if (err != -ENOENT || offset != 0)
+ if (err != -ENOENT || offset != 0) {
+ lafs_iput_fs(fsinode);
return err;
+ }
- /* FIXME creating new inode */
- BUG();
+ db = lafs_get_block(fsinode, bnum, NULL, GFP_KERNEL,
+ MKREF(roll));
+ sb = lafs_get_subset_sb(fsinode);
+ lafs_inode_inuse(fs, sb, bnum);
+ deactivate_super(sb);
+ lafs_iput_fs(fsinode);
+ if (!db)
+ db = ERR_PTR(-ENOMEM);
+ } else {
+ lafs_iput_fs(fsinode);
+ db = lafs_inode_dblock(inode, SYNC, MKREF(roll));
+ if (!IS_ERR(db))
+ /* Make sure block is in-sync with inode */
+ lafs_inode_fillblock(inode);
}
- db = lafs_inode_dblock(inode, SYNC, MKREF(roll));
if (IS_ERR(db)) {
err = PTR_ERR(db);
break;
* need that during roll-forward */
set_bit(B_PinPending, &db->b.flags);
lafs_pin_dblock(db, CleanSpace);
- /* Make sure block is in-sync with inode */
- lafs_inode_fillblock(inode);
buf = map_dblock(db);
memcpy(buf+offset, data, len);
unmap_dblock(db, buf);
- err = lafs_import_inode(inode, db);
+ if (inode)
+ err = lafs_import_inode(inode, db);
+ else {
+ inode = lafs_iget_fs(fs, inum, bnum, SYNC);
+
+ }
lafs_dirty_dblock(db);
break;