4 * Copyright (C) 2005-2009
5 * Neil Brown <neilb@suse.de>
6 * Released under the GPL, version 2
9 #include <linux/blkdev.h>
11 #include <linux/highmem.h>
16 extern int lafs_trace;
17 #define dprintk(x...) do { if(lafs_trace)printk(x); }while(0)
19 #define LAFS_BUG(cond, b) do { if (cond) { printk(KERN_ERR "%s:%d: %s\n", __FILE__,__LINE__,strblk(b));BUG();}}while(0)
22 extern struct fs *dfs;
23 extern struct freelists {
25 unsigned long freecnt;
27 extern void lafs_dump_tree(void);
31 #define REFARG char *__refname
33 #define MKREF(name) #name
35 #define add_ref(a,b,c,d) lafs_add_ref(a,b,c,d)
36 #define del_ref(a,b,c,d) lafs_del_ref(a,b,c,d)
37 #define has_ref(a,b) lafs_has_ref(a,b)
39 #define lafs_get_block(a,b,c,d,e) _lafs_get_block(a,b,c,d,e)
40 #define first_in_seg(a,b,c,d,e) _first_in_seg(a,b,c,d,e)
41 #define dir_lookup_blk(a,b,c,d,e,f,g) _dir_lookup_blk(a,b,c,d,e,f,g)
42 #define lafs_iblock_alloc(a,b,c,d) _lafs_iblock_alloc(a,b,c,d)
43 #define ihash_lookup(a,b,c,d,e) _ihash_lookup(a,b,c,d,e)
44 #define iblock_get(a,b,c,d,e) _iblock_get(a,b,c,d,e)
45 #define lafs_make_iblock(a,b,c,d) _lafs_make_iblock(a,b,c,d)
46 #define lafs_leaf_find(a,b,c,d,e,f) _lafs_leaf_find(a,b,c,d,e,f)
47 #define lafs_inode_dblock(a,b,c) _lafs_inode_dblock(a,b,c)
48 #define lafs_inode_get_dblock(a,b) _lafs_inode_get_dblock(a,b)
54 #define add_ref(a,b,c,d) do {} while (0)
55 #define del_ref(a,b,c,d) do {} while (0)
56 #define has_ref(a,b) (-1)
58 #define lafs_get_block(a,b,c,d,e) _lafs_get_block(a,b,c,d)
59 #define first_in_seg(a,b,c,d,e) _first_in_seg(a,b,c,d)
60 #define dir_lookup_blk(a,b,c,d,e,f,g) _dir_lookup_blk(a,b,c,d,e,f)
61 #define lafs_iblock_alloc(a,b,c,d) _lafs_iblock_alloc(a,b,c)
62 #define ihash_lookup(a,b,c,d,e) _ihash_lookup(a,b,c,d)
63 #define iblock_get(a,b,c,d,e) _iblock_get(a,b,c,d)
64 #define lafs_make_iblock(a,b,c,d) _lafs_make_iblock(a,b,c)
65 #define lafs_leaf_find(a,b,c,d,e,f) _lafs_leaf_find(a,b,c,d,e)
66 #define lafs_inode_dblock(a,b,c) _lafs_inode_dblock(a,b)
67 #define lafs_inode_get_dblock(a,b) _lafs_inode_get_dblock(a)
69 #define strblk(a) lafs_strblk(a)
72 u32_after(u32 a, u32 b)
77 const extern struct inode_operations lafs_file_ino_operations;
78 const extern struct inode_operations lafs_dir_ino_operations;
79 const extern struct inode_operations lafs_subset_ino_operations;
80 const extern struct inode_operations lafs_link_ino_operations;
81 const extern struct inode_operations lafs_special_ino_operations;
82 const extern struct file_operations lafs_file_file_operations;
83 const extern struct file_operations lafs_dir_file_operations;
84 const extern struct file_operations lafs_subset_file_operations;
85 const extern struct address_space_operations lafs_file_aops;
86 const extern struct address_space_operations lafs_index_operations;
97 int lafs_sync_page_io(struct block_device *bdev, sector_t sector, int offset,
98 int size, struct page *page, int rw);
99 int lafs_load_page(struct fs *fs, struct page *p, u64 vaddr, int blocks);
100 int lafs_load_page_async(struct fs *fs, struct page *p, u64 vaddr, int blocks,
101 struct async_complete *ac);
102 int __must_check lafs_load_block(struct block *b, struct bio *bio);
103 int __must_check lafs_wait_block(struct block *b);
104 int __must_check lafs_wait_block_async(struct block *b);
105 int __must_check lafs_find_block(struct datablock *b, int adopt);
106 int __must_check lafs_find_block_async(struct datablock *b);
107 int __must_check lafs_read_block(struct datablock *b);
108 int __must_check lafs_read_block_async(struct datablock *b);
109 int __must_check lafs_find_next(struct inode *b, loff_t *bnum);
110 struct indexblock *lafs_leaf_find(struct inode *inode, u32 addr,
111 int adopt, u32 *next, int async, REFARG);
112 u32 lafs_leaf_next(struct indexblock *ib, u32 start);
113 int lafs_index_empty(struct indexblock *ib);
115 #define set_iolock_info(b) ( (b)->iolock_file = __FILE__, (b)->iolock_line = __LINE__)
117 #define set_iolock_info(b) (0)
119 #define lafs_iolock_block(b) do { _lafs_iolock_block(b); set_iolock_info(b); } while(0)
120 #define lafs_iolock_block_async(b) ( _lafs_iolock_block_async(b) ? ( set_iolock_info(b), 1) : 0)
121 #define lafs_iolock_written(b) do { _lafs_iolock_written(b); set_iolock_info(b); } while(0)
122 #define lafs_iolock_written_async(b) ( _lafs_iolock_written_async(b) ? ( set_iolock_info(b), 1) : 0)
124 void lafs_io_wake(struct block *b);
125 void _lafs_iolock_block(struct block *b);
126 void _lafs_iolock_written(struct block *b);
127 int _lafs_iolock_block_async(struct block *b);
128 int _lafs_iolock_written_async(struct block *b);
130 void lafs_iounlock_block(struct block *b);
131 void lafs_iocheck_block(struct datablock *db, int unlock);
132 void lafs_iocheck_writeback(struct datablock *db, int unlock);
133 void lafs_writeback_done(struct block *b);
135 void lafs_super_write(struct fs *fs, int dev, u64 addr, char *buf, int size);
136 int lafs_super_wait(struct fs *fs);
139 int __must_check lafs_mount(struct fs *fs);
140 struct inode *lafs_iget(struct super_block *fs, ino_t inum, int async);
141 struct inode *lafs_iget_fs(struct fs *fs, int fsnum, int inum, int async);
142 int __must_check lafs_import_inode(struct inode *ino, struct datablock *b);
143 void lafs_inode_checkpin(struct inode *ino);
144 void lafs_clear_inode(struct inode *ino);
145 void lafs_delete_inode(struct inode *ino);
146 void lafs_dirty_inode(struct inode *ino);
147 int lafs_sync_inode(struct inode *ino, int wait);
148 struct inode *lafs_new_inode(struct fs *fs, struct super_block *sb,
149 struct inode *dir, int type,
150 int inum, int mode, struct datablock **inodbp);
151 int lafs_lock_inode(struct inode *ino);
152 void lafs_inode_fillblock(struct inode *ino);
153 struct datablock *lafs_inode_dblock(struct inode *ino, int async, REFARG);
154 struct datablock *lafs_inode_get_dblock(struct inode *ino, REFARG);
155 int lafs_inode_handle_orphan(struct datablock *b);
157 struct datablock *lafs_get_block(struct inode *ino, unsigned long index,
158 struct page *p, int gfp, REFARG);
160 void add_ref(struct block *b, char *ref, char *file, int line);
161 void del_ref(struct block *b, char *ref, char *file, int line);
162 int has_ref(struct block *b, char *ref);
165 int lafs_setattr(struct dentry *dentry, struct iattr *attr);
167 char *strblk(struct block *b);
168 int lafs_print_tree(struct block *b, int depth);
169 int lafs_dev_find(struct fs *fs, u64 virt);
172 virttoseg(struct fs *fs, u64 virt, int *devp, u32 *segp, u32 *offsetp)
174 int d = lafs_dev_find(fs, virt);
175 struct fs_dev *dv = &fs->devs[d];
178 if (dv->segment_size >= dv->width * dv->stride) {
179 *offsetp = do_div(virt, dv->segment_size);
183 int of = do_div(v2, dv->stride);
186 do_div(v2, dv->width * dv->stride);
189 *segp = (strp * dv->stride + of) /
190 (dv->segment_size / dv->width);
191 *offsetp = virt - dv->segment_stride * *segp;
196 static inline u64 segtovirt(struct fs *fs, int dev, u32 segnum)
198 return fs->devs[dev].start +
199 (u64)fs->devs[dev].segment_stride * segnum;
203 in_seg(struct fs *fs, int d, u32 seg, u64 virt)
205 struct fs_dev *dv = &fs->devs[d];
209 if (virt < dv->start ||
210 virt >= dv->start + dv->size)
214 if (dv->segment_size >= dv->width * dv->stride) {
215 do_div(virt, dv->segment_size);
219 int of = do_div(v2, dv->stride);
222 do_div(v2, dv->width * dv->stride);
225 return seg == ((strp * dv->stride + of) /
226 (dv->segment_size / dv->width));
231 virttophys(struct fs *fs, u64 virt, int *devp, sector_t *sectp)
233 int d = lafs_dev_find(fs, virt);
234 struct fs_dev *dv = &fs->devs[d];
240 virt <<= (fs->blocksize_bits - 9);
241 virt += (dv->segment_offset)>>9;
245 static inline int dblock_offset(struct datablock *b)
247 return (b - (struct datablock*)b->page->private)
248 << b->b.inode->i_blkbits;
252 static inline int iblock_offset(struct indexblock *b)
254 return (b - (struct indexblock*)b->b.page->private)
255 << b->b.inode->i_blkbits;
258 static inline int block_offset(struct block *b)
260 if (test_bit(B_Index, &b->flags))
261 return iblock_offset(iblk(b));
263 return dblock_offset(dblk(b));
267 static inline void *map_dblock(struct datablock *b)
269 void *a = kmap_atomic(b->page, KM_USER0);
270 a += dblock_offset(b);
274 static inline void unmap_dblock(struct datablock *b, void *buf)
276 kunmap_atomic(buf - dblock_offset(b), KM_USER0);
279 static inline void *map_dblock_2(struct datablock *b)
281 void *a = kmap_atomic(b->page, KM_USER1);
282 a += dblock_offset(b);
286 static inline void unmap_dblock_2(struct datablock *b, void *buf)
288 kunmap_atomic(buf - dblock_offset(b), KM_USER1);
291 static inline void *map_iblock(struct indexblock *b)
293 LAFS_BUG(!test_bit(B_IOLock, &b->b.flags), &b->b);
294 if (test_bit(B_InoIdx, &b->b.flags))
295 return map_dblock(LAFSI(b->b.inode)->dblock);
299 static inline void unmap_iblock(struct indexblock *b, void *buf)
301 if (test_bit(B_InoIdx, &b->b.flags))
302 unmap_dblock(LAFSI(b->b.inode)->dblock, buf);
305 static inline void *map_iblock_2(struct indexblock *b)
307 if (test_bit(B_InoIdx, &b->b.flags))
308 return map_dblock_2(LAFSI(b->b.inode)->dblock);
312 static inline void unmap_iblock_2(struct indexblock *b, void *buf)
314 if (test_bit(B_InoIdx, &b->b.flags))
315 unmap_dblock_2(LAFSI(b->b.inode)->dblock, buf);
318 static inline void decode_time(struct timespec *ts, u64 te)
320 /* low 35 bits are seconds (800 years)
321 * high 29 bits are 2nanoseconds
323 ts->tv_sec = te& (0x7FFFFFFFFULL);
324 ts->tv_nsec = (te>>34) & ~(long)1;
327 static inline u64 encode_time(struct timespec *ts)
331 t &= (0x7FFFFFFFFULL);
332 tn = ts->tv_nsec & ~(long)1;
337 /* s_fs_info points to an allocated sb_key structure */
343 static inline struct fs *fs_from_sb(struct super_block *sb)
345 struct sb_key *k = sb->s_fs_info;
349 static inline struct inode *ino_from_sb(struct super_block *sb)
351 struct sb_key *k = sb->s_fs_info;
355 static inline struct fs *fs_from_inode(struct inode *ino)
357 return fs_from_sb(ino->i_sb);
360 static inline int set_phase(struct block *b, int ph)
362 if (b->inode->i_ino == 0 && b->fileaddr == 0)
363 dprintk("SETPHASE %s to %d\n", strblk(b), ph);
365 set_bit(B_Phase1, &b->flags);
367 clear_bit(B_Phase1, &b->flags);
369 /* FIXME do I need to lock access to ->parent */
370 if (!test_and_set_bit(B_Pinned, &b->flags) &&
372 atomic_inc(&b->parent->pincnt[ph]);
379 * db->my_inode is protected by rcu. We can 'get' it and
380 * remain rcu_protected, or 'iget' it and be protected by a
383 static inline struct inode *rcu_my_inode(struct datablock *db)
388 if (LAFSI(db->b.inode)->type != TypeInodeFile)
391 ino = rcu_dereference(db->my_inode);
397 static inline void rcu_iput(struct inode *ino)
403 static inline struct inode *iget_my_inode(struct datablock *db)
405 struct inode *ino = rcu_my_inode(db);
406 struct inode *rv = NULL;
414 * blocks (data and index) are reference counted.
415 * 'getref' increments the reference count, and could remove the
416 * block from any 'lru' list. However to save effort, we simply
417 * treat anything on an lru list which has a non-zero reference
418 * count as invisible.
419 * 'putref' drops the count and calls lafs_refile to see if anything
422 int lafs_is_leaf(struct block *b, int ph);
423 void lafs_refile(struct block *b, int dec);
425 extern struct indexblock *lafs_getiref_locked(struct indexblock *ib);
427 extern spinlock_t lafs_hash_lock;
429 static inline struct block *__getref(struct block *b)
432 atomic_inc(&b->refcnt);
436 static inline struct block *_getref(struct block *b)
438 LAFS_BUG(b && atomic_read(&b->refcnt) == 0, b);
442 static inline struct datablock *_getdref_locked(struct datablock *b)
444 LAFS_BUG(!spin_is_locked(&b->b.inode->i_data.private_lock), &b->b);
449 static inline struct block *_getref_locked(struct block *b)
451 LAFS_BUG(!spin_is_locked(&b->inode->i_data.private_lock), b);
452 if (test_bit(B_InoIdx, &b->flags))
453 return &lafs_getiref_locked(iblk(b))->b;
458 static inline struct indexblock *_getiref_locked_needsync(struct indexblock *b)
460 LAFS_BUG(!spin_is_locked(&lafs_hash_lock), &b->b);
461 if (test_bit(B_InoIdx, &b->b.flags))
462 return lafs_getiref_locked(b);
467 static inline struct block *_getref_locked_needsync(struct block *b)
469 LAFS_BUG(!spin_is_locked(&fs_from_inode(b->inode)->lock), b);
470 if (test_bit(B_InoIdx, &b->flags))
471 return &lafs_getiref_locked(iblk(b))->b;
476 static inline void _putref(struct block *b)
480 BUG_ON(atomic_read(&b->refcnt)==0);
485 #define _reflog(c,blk,ref,add) (c(blk))
487 #define _reflog(c,blk,ref,add) ({ !blk ? 0 : \
488 add? add_ref(blk,ref,__FILE__,__LINE__) : \
489 del_ref(blk,ref, __FILE__,__LINE__); c(blk); })
490 #define _refxlog(c,blk,ref,add) ({ \
491 add? add_ref(&(blk)->b,ref,__FILE__,__LINE__) : \
492 del_ref(&(blk)->b,ref, __FILE__,__LINE__); c(blk); })
494 #define getref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->refcnt));}), _reflog(_getref, blk,r,1))
495 #define getref_locked(blk, r) _reflog(_getref_locked, blk,r,1)
496 #define getdref_locked(blk, r) _refxlog(_getdref_locked, blk,r,1)
497 #define getref_locked_needsync(blk, r) _reflog(_getref_locked_needsync, blk,r,1)
498 #define getiref_locked_needsync(blk, r) _refxlog(_getiref_locked_needsync, blk,r,1)
499 #define putref(blk, r) _reflog(_putref, blk,r,0)
500 #define getdref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->b.refcnt));}), _reflog(_getref, (&(blk)->b),r,1),blk)
501 #define getiref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->b.refcnt));}), _reflog(_getref, (&(blk)->b),r,1),blk)
502 #define putdref(blk, r) ({ if (blk) _reflog(_putref, (&(blk)->b),r,0);})
503 #define putiref(blk, r) ({ if (blk) _reflog(_putref, (&(blk)->b),r,0);})
505 /* When we get a ref on a block other than under b->inode->i_data.private_lock, we
506 * need to call this to ensure that lafs_refile isn't still handling a refcnt->0 transition.
508 static inline void sync_ref(struct block *b)
510 spin_lock(&b->inode->i_data.private_lock);
511 spin_unlock(&b->inode->i_data.private_lock);
515 * Notes on locking and block references.
516 * There are several places that can hold uncounted references to blocks.
517 * When we try to convert such a reference to a counted reference we need to be careful.
518 * We must increment the refcount under a lock that protects the particular reference, and
519 * then must call sync_ref above to ensure our new reference is safe.
520 * When the last counted reference on a block is dropped (in lafs_refile) we hold private_lock
521 * while cleaning up and sync_ref assures that cleanup is finished.
522 * Before we can free a block we must synchronise with these other locks and
523 * then ensure that the refcount is still zero.
524 * For index blocks that means taking lafs_hash_lock and ensuring the count is still zero.
525 * For most data blocks we need to remove from the lru list using fs->lock, and check the count is
527 * For inode data blocks we need to follow the inode and break the ->dblock lock under the inodes
528 * own lock - but only if the refcount is still zero.
530 * The list of non-counted references and the locks which protect them is:
532 * index hash table lafs_hash_lock
533 * index freelist lafs_hash_lock
534 * phase_leafs / clean_leafs fs->lock
535 * inode->iblock lafs_hash_lock
536 * inode->dblock inode->i_data.private_lock
537 * inode->free_index lafs_hash_lock
538 * page->private page lock
540 * Lock ordering among these is:
541 * inode->i_data.private_lock
542 * LAFSI(inode)->dblock->inode->i_data.private_lock
548 int __must_check lafs_setparent(struct datablock *blk);
551 * extract little-endian values out of memory.
552 * Each function is given a char*, and moves it forwards
555 #define decode16(p) ({ unsigned int _a; _a= (unsigned char)*(p++); \
556 _a + (((unsigned char)*p++)<<8); })
557 #define decode32(p) ({ u32 _b; _b = decode16(p); _b + ((u32)decode16(p)<<16); })
558 #define decode48(p) ({ u64 _c; _c = decode32(p); _c + ((u64)decode16(p)<<32); })
560 #define encode16(p,n) ({ *(p++) = (n)&255; *(p++) = ((n)>>8) & 255; })
561 #define encode32(p,n) ({ encode16(p,n); encode16(p, ((n)>>16)); })
562 #define encode48(p,n) ({ encode32(p,n); encode16(p, ((n)>>32)); })
564 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
566 static inline int space_needed(int len, int chainoffset, int psz)
569 space = len + (chainoffset > 255 ? 4 : chainoffset > 1 ? 1 : 0);
570 space += offsetof(struct dirpiece, name);
571 space = DIV_ROUND_UP(space, 1<<psz);
582 struct update_handle {
587 void lafs_dir_print(char *buf, int psz);
588 int lafs_dir_blk_size(char *block, int psz);
589 void lafs_dir_clearparent(struct datablock *b);
590 struct block *lafs_dir_drop_parent(struct datablock *b);
592 int lafs_hash_name(u32 seed, int len, const char *name);
593 int lafs_dir_find(char *block, int psz, u32 seed, u32 hash,
595 int lafs_dir_findfirst(char *block, int psz);
596 void lafs_dir_init_block(char *block, int psz, const char *name, int len,
597 u32 target, int type, int chainoffset);
598 #define DT_TEST 128 /* for lafs_dir_add_ent, this flags to test for space,
599 * but not do any actual add
601 #define DT_INTERNAL 129 /* Flags that this is an internal dir block */
602 int lafs_dir_add_ent(char *block, int psz, const char *name, int len,
603 u32 target, int type, u32 seed, u32 hash, int chainoffset);
604 int lafs_dir_del_ent(char *block, int psz, u32 seed, u32 hash);
605 int lafs_dir_next_ent(char *block, int psz, int *pp, char *name, u32 *target,
607 void lafs_dir_repack(char *block, int psz, char *new, u32 seed, int merge);
608 void dir_get_prefix(char *b1, char *b2, int psz, char *prefix);
609 void lafs_dir_split(char *orig, int psz, char *new1, char *new2, const char *name,
610 u32 target, int type, u32 *newhash, u32 seed, u32 hash,
612 /*void dir_settarget(char *block, int psz, int piece, u32 target);*/
613 struct dir_ent *lafs_dir_extract(char *block, int psz, struct dir_ent *de,
614 int pnum, u32 *hash);
615 void lafs_dir_set_target(char *block, int psz, struct dir_ent *de, int pnum);
616 int lafs_dir_empty(char *block);
617 void lafs_dir_make_index(char *orig, char *new, int psz, u32 target);
619 int lafs_dir_handle_orphan(struct datablock *db);
621 int lafs_release_page(struct page *page, gfp_t gfp_flags);
622 void lafs_invalidate_page(struct page *page, unsigned long offset);
624 extern struct file_system_type lafs_fs_type;
625 extern struct file_system_type lafs_snap_fs_type;
627 int lafs_ihash_init(void);
628 void lafs_ihash_free(void);
629 void lafs_release_index(struct list_head *head);
630 struct indexblock *lafs_iblock_alloc(struct fs *fs, int gfp, int with_buffer,
632 void lafs_iblock_free(struct indexblock *ib);
633 void lafs_hash_iblock(struct indexblock *ib);
634 void lafs_unhash_iblock(struct indexblock *ib);
636 void lafs_summary_update(struct fs *fs, struct inode *ino,
637 u64 oldphys, u64 newphys, int is_index, int phase,
639 int lafs_summary_allocate(struct fs *fs, struct inode *ino, int diff);
640 void lafs_qcommit(struct fs *fs, struct inode *ino, int diff, int phase);
641 void lafs_incorporate(struct fs *fs, struct indexblock *ib);
642 void lafs_walk_leaf_index(struct indexblock *ib,
643 int (*handle)(void*, u32, u64, int),
645 void lafs_clear_index(struct indexblock *ib);
646 void lafs_print_uninc(struct uninc *ui);
648 int lafs_allocated_block(struct fs *fs, struct block *blk, u64 phys);
650 int lafs_pin_dblock(struct datablock *b, int alloc_type);
651 int lafs_reserve_block(struct block *b, int alloc_type);
652 void lafs_dirty_dblock(struct datablock *b);
653 void lafs_erase_dblock(struct datablock *b);
654 int lafs_erase_dblock_async(struct datablock *b);
655 void lafs_dirty_iblock(struct indexblock *b);
656 void block_drop_addr(struct fs *fs, struct inode *ino, u32 addr);
657 void lafs_flush(struct datablock *b);
659 bio_end_io_t *lafs_cluster_endio_choose(int which, int header);
660 int lafs_cluster_update_prepare(struct update_handle *uh, struct fs *fs,
662 int lafs_cluster_update_pin(struct update_handle *uh);
664 lafs_cluster_update_commit(struct update_handle *uh,
666 int offset, int len);
668 lafs_cluster_update_commit_buf(struct update_handle *uh, struct fs *fs,
669 struct inode *ino, u32 addr,
670 int offset, int len, const char *str1,
671 int len2, const char *str2);
672 void lafs_cluster_update_abort(struct update_handle *uh);
673 #define MAX_CHEAD_BLOCKS 4
675 static inline void lafs_cluster_wait(struct fs *fs, unsigned long long seq)
677 wait_event(fs->wc[0].pending_wait,
678 fs->wc[0].cluster_seq > seq);
681 void lafs_cluster_wait_all(struct fs *fs);
682 int lafs_cluster_empty(struct fs *fs, int cnum);
685 int lafs_write_state(struct fs *fs);
686 void lafs_destroy_inode(struct inode *inode);
689 void lafs_checkpoint_lock(struct fs *fs);
690 void lafs_checkpoint_unlock(struct fs *fs);
691 void lafs_checkpoint_unlock_wait(struct fs *fs);
692 unsigned long long lafs_checkpoint_start(struct fs *fs);
693 unsigned long lafs_do_checkpoint(struct fs *fs);
694 struct block *lafs_get_flushable(struct fs *fs, int phase);
696 int lafs_make_orphan(struct fs *fs, struct datablock *db, struct inode *ino);
697 int lafs_make_orphan_nb(struct fs *fs, struct datablock *db, struct inode *ino);
698 void lafs_orphan_release(struct fs *fs, struct datablock *b, struct inode *ino);
699 long lafs_run_orphans(struct fs *fs);
700 void lafs_add_orphan(struct fs *fs, struct datablock *db);
701 void lafs_orphan_forget(struct fs *fs, struct datablock *db);
702 struct datablock *lafs_find_orphan(struct inode *ino);
703 int lafs_count_orphans(struct inode *ino);
704 void lafs_add_orphans(struct fs *fs, struct inode *ino, int count);
707 int lafs_prealloc(struct block *b, int type);
708 int lafs_seg_ref_block(struct block *b, int ssnum);
709 void lafs_seg_deref(struct fs *fs, u64 addr, int ssnum);
710 void lafs_add_active(struct fs *fs, u64 addr);
711 void lafs_seg_forget(struct fs *fs, int dev, u32 seg);
712 void lafs_seg_flush_all(struct fs *fs);
713 void lafs_seg_apply_all(struct fs *fs);
714 void lafs_seg_put_all(struct fs *fs);
715 void lafs_empty_segment_table(struct fs *fs);
716 int __must_check lafs_seg_dup(struct fs *fs, int which);
717 void lafs_seg_move(struct fs *fs, u64 oldaddr, u64 newaddr,
718 int ssnum, int phase, int moveref);
719 int lafs_segtrack_init(struct segtracker *st);
720 void lafs_segtrack_free(struct segtracker *st);
721 void lafs_update_youth(struct fs *fs, int dev, u32 seg);
723 extern int temp_credits;/* debugging */
724 void lafs_free_get(struct fs *fs, unsigned int *dev, u32 *seg,
726 int lafs_get_cleanable(struct fs *fs, u16 *dev, u32 *seg);
728 void lafs_space_return(struct fs *fs, int credits);
729 int lafs_alloc_cleaner_segs(struct fs *fs, int max);
730 int lafs_space_alloc(struct fs *fs, int credits, int why);
731 unsigned long lafs_scan_seg(struct fs *fs);
732 int lafs_clean_count(struct fs *fs, int *any_clean);
733 void lafs_clean_free(struct fs *fs);
736 unsigned long lafs_do_clean(struct fs *fs);
737 void lafs_unclean(struct datablock *db);
739 /* Thread management */
740 int lafs_start_thread(struct fs *fs);
741 void lafs_stop_thread(struct fs *fs);
742 void lafs_wake_thread(struct fs *fs);
743 void lafs_trigger_flush(struct block *b);
746 int lafs_cluster_allocate(struct block *b, int cnum);
747 void lafs_cluster_flush(struct fs *fs, int cnum);
748 int lafs_calc_cluster_csum(struct cluster_head *head);
749 int lafs_cluster_init(struct fs *fs, int cnum, u64 addr, u64 prev, u64 seq);
750 void lafs_clusters_done(struct fs *fs);
751 void lafs_done_work(struct work_struct *ws);
752 void lafs_close_all_segments(struct fs *fs);
755 void lafs_pin_block_ph(struct block *b, int ph);
756 static inline void lafs_pin_block(struct block *b)
758 lafs_pin_block_ph(b, fs_from_inode(b->inode)->phase);
761 int lafs_add_block_address(struct fs *fs, struct block *blk);
762 void lafs_flip_dblock(struct datablock *db);
763 void lafs_phase_flip(struct fs *fs, struct indexblock *ib);
764 struct indexblock * __must_check
765 lafs_make_iblock(struct inode *ino, int adopt, int async, REFARG);
767 lafs_iblock_get(struct inode *ino, faddr_t addr, int depth, paddr_t phys, REFARG);
770 void lafs_write_head(struct fs *fs, struct cluster_head *head, u64 virt,
772 void lafs_write_block(struct fs *fs, struct block *b, struct wc *wc);
773 void lafs_write_flush(struct fs *fs, struct wc *wc);
776 int lafs_quota_allocate(struct fs *fs, struct inode *ino, int diff);
778 #define __wait_event_lock(wq, condition, lock) \
780 wait_queue_t __wait; \
781 init_waitqueue_entry(&__wait, current); \
783 add_wait_queue(&wq, &__wait); \
785 set_current_state(TASK_UNINTERRUPTIBLE); \
788 spin_unlock(&lock); \
792 current->state = TASK_RUNNING; \
793 remove_wait_queue(&wq, &__wait); \
796 #define wait_event_lock(wq, condition, lock) \
800 __wait_event_lock(wq, condition, lock); \