]> git.neil.brown.name Git - LaFS.git/commitdiff
Avoid quadratic searches in segment table.
authorNeilBrown <neilb@suse.de>
Fri, 4 Mar 2011 01:47:30 +0000 (12:47 +1100)
committerNeilBrown <neilb@suse.de>
Fri, 4 Mar 2011 01:47:30 +0000 (12:47 +1100)
There are a couple of time that we need to iterate of the segment
table (stable) and need to drop the lock each time.
Rather than already restart after we find something of interest,
keep track of when we make a change to the table (thus possibly
breaking an iterator) and only restart when that change is seen.

Both the iterators are completely serialised so resetting
stable_changed when we retry cannot confuse a different iterator.

Signed-off-by: NeilBrown <neilb@suse.de>
segments.c
state.h

index eb4b8bfda6186bdd1098a4304a112d9352cabd51..63f7df85db09d451f0bfe4b36966ae7885ff2f80 100644 (file)
@@ -111,6 +111,8 @@ static void ss_put(struct segsum *ss, struct fs *fs)
                else {
                        if (!hlist_unhashed(&ss->hash))
                                hlist_del(&ss->hash);
+                       if (!fs->stable_changed)
+                               fs->stable_changed = 1;
                        spin_unlock(&fs->stable_lock);
                        putdref(ss->ssblk, MKREF(ss));
                        putdref(ss->youthblk, MKREF(ssyouth));
@@ -233,11 +235,11 @@ void lafs_seg_put_all(struct fs *fs)
                if (!hlist_empty(&fs->stable[i])) {
                        struct segsum *ss;
                        struct datablock *b, *y;
-                       struct hlist_node *n;
-                       /* FIXME this is quadratic */
-               retry:
+                       struct hlist_node *n, *pos;
+
                        spin_lock(&fs->stable_lock);
-                       hlist_for_each_entry(ss, n, &fs->stable[i], hash) {
+               retry:
+                       hlist_for_each_entry_safe(ss, pos, n, &fs->stable[i], hash) {
                                b = ss->ssblk;
                                y = ss->youthblk;
                                if (!b && !y)
@@ -251,7 +253,9 @@ void lafs_seg_put_all(struct fs *fs)
                                        putdref(y, MKREF(async));
                                putdref(b, MKREF(ss));
                                putdref(y, MKREF(ssyouth));
-                               goto retry;
+                               spin_lock(&fs->stable_lock);
+                               if (fs->stable_changed)
+                                       goto retry;
                        }
                        spin_unlock(&fs->stable_lock);
                }
@@ -481,14 +485,18 @@ void lafs_seg_apply_all(struct fs *fs)
                struct segsum *ss;
                struct hlist_node *n, *pos;
                spin_lock(&fs->stable_lock);
+       retry:
+               fs->stable_changed = 0;
                hlist_for_each_entry_safe(ss, pos, n, head, hash) {
+                       if (atomic_read(&ss->delayed) == 0)
+                               continue;
                        atomic_inc(&ss->refcnt);
                        spin_unlock(&fs->stable_lock);
                        seg_apply(fs, ss);
                        ss_put(ss, fs);
                        spin_lock(&fs->stable_lock);
-                       // FIXME this still isn't safe - 'n' could
-                       // disappear while unlocked.
+                       if (fs->stable_changed)
+                               goto retry;
                }
                spin_unlock(&fs->stable_lock);
        }
diff --git a/state.h b/state.h
index 0808018a0b7a03c852e80a08e154fb5578e0d014..0d373d827ff32104de70ae3e35f084eb262f5ed5 100644 (file)
--- a/state.h
+++ b/state.h
@@ -333,6 +333,8 @@ struct fs {
 
        struct hlist_head stable[SHASHSIZE];
        spinlock_t stable_lock;
+       int stable_changed; /* Flag so we can see if the table changed while
+                            * we dropped a lock */
 
        struct rename_roll {
                struct rename_roll *next;