}
}
+static void set_lru(struct block *b)
+{
+ struct fs *fs;
+ int ph = !!test_bit(B_Phase1, &b->flags);
+
+ if (!test_bit(B_OnFree, &b->flags) && !list_empty_careful(&b->lru))
+ return;
+ if (test_bit(B_IOLock, &b->flags) || !lafs_is_leaf(b, ph))
+ return;
+
+ /* This is close enough to a leaf that we should put it on a list.
+ * If we raced and it isn't then it will be found and removed
+ */
+ if (test_bit(B_OnFree, &b->flags)) {
+ spin_lock(&lafs_hash_lock);
+ if (test_and_clear_bit(B_OnFree, &b->flags))
+ list_del_init(&b->lru);
+ spin_unlock(&lafs_hash_lock);
+ }
+ fs = fs_from_inode(b->inode);
+ spin_lock(&fs->lock);
+ if (list_empty(&b->lru)) {
+ ph = !!test_bit(B_Phase1, &b->flags);
+ if (test_bit(B_Realloc, &b->flags))
+ list_add(&b->lru, &fs->clean_leafs);
+ else
+ list_add(&b->lru, &fs->phase_leafs[ph]);
+
+ getref(b, MKREF(leaf));
+ }
+ spin_unlock(&fs->lock);
+ /* allow lafs_iolock_block-empty to complete */
+ lafs_io_wake(b);
+}
+
void lafs_refile(struct block *b, int dec)
{
struct block *next = NULL, *next_parent = NULL;
struct inode *checkpin = NULL;
struct inode *myi = NULL;
+ set_lru(b);
+
spin_lock(&lafs_hash_lock);
if (!list_empty(&b->lru) &&
lafs_checkpoint_unlock(fs);
}
- /* Make sure lru is correct */
- if ((list_empty(&b->lru) || test_bit(B_OnFree, &b->flags)) &&
- !test_bit(B_IOLock, &b->flags) &&
- lafs_is_leaf(b, ph)) {
- if (test_and_clear_bit(B_OnFree, &b->flags)) {
- /* lock hashlock - but we have that */
- list_del_init(&b->lru);
- /* unlock */
- }
- spin_lock(&fs->lock);
- if (list_empty(&b->lru)) {
- if (test_bit(B_Realloc, &b->flags))
- list_add(&b->lru, &fs->clean_leafs);
- else
- list_add(&b->lru, &fs->phase_leafs[ph]);
-
- if (test_bit(B_Index, &b->flags))
- getiref_locked(iblk(b), MKREF(leaf));
- else
- getref(b, MKREF(leaf));
- }
- spin_unlock(&fs->lock);
- lafs_io_wake(b);
- }
/* check the ->parent link */
if (atomic_read(&b->refcnt) == dec) {
if (b->parent &&