4 * Copyright (C) 2005-2009
5 * Neil Brown <neilb@suse.de>
6 * Released under the GPL, version 2
9 #include <linux/blkdev.h>
11 #include <linux/highmem.h>
16 extern int lafs_trace;
17 #define dprintk(x...) do { if(lafs_trace)printk(x); }while(0)
19 #define LAFS_BUG(cond, b) do { if (cond) { printk(KERN_ERR "%s:%d: %s\n", __FILE__,__LINE__,strblk(b));BUG();}}while(0)
22 extern struct fs *dfs;
23 extern struct freelists {
25 unsigned long freecnt;
27 extern void lafs_dump_tree(void);
31 #define REFARG char *__refname
33 #define MKREF(name) #name
35 #define add_ref(a,b,c,d) lafs_add_ref(a,b,c,d)
36 #define del_ref(a,b,c,d) lafs_del_ref(a,b,c,d)
37 #define has_ref(a,b) lafs_has_ref(a,b)
39 #define lafs_get_block(a,b,c,d,e) _lafs_get_block(a,b,c,d,e)
40 #define first_in_seg(a,b,c,d,e) _first_in_seg(a,b,c,d,e)
41 #define dir_lookup_blk(a,b,c,d,e,f,g) _dir_lookup_blk(a,b,c,d,e,f,g)
42 #define lafs_iblock_alloc(a,b,c,d) _lafs_iblock_alloc(a,b,c,d)
43 #define ihash_lookup(a,b,c,d,e) _ihash_lookup(a,b,c,d,e)
44 #define iblock_get(a,b,c,d,e) _iblock_get(a,b,c,d,e)
45 #define lafs_make_iblock(a,b,c,d) _lafs_make_iblock(a,b,c,d)
46 #define lafs_leaf_find(a,b,c,d,e,f) _lafs_leaf_find(a,b,c,d,e,f)
47 #define lafs_inode_dblock(a,b,c) _lafs_inode_dblock(a,b,c)
48 #define lafs_inode_get_dblock(a,b) _lafs_inode_get_dblock(a,b)
54 #define add_ref(a,b,c,d) do {} while (0)
55 #define del_ref(a,b,c,d) do {} while (0)
56 #define has_ref(a,b) (-1)
58 #define lafs_get_block(a,b,c,d,e) _lafs_get_block(a,b,c,d)
59 #define first_in_seg(a,b,c,d,e) _first_in_seg(a,b,c,d)
60 #define dir_lookup_blk(a,b,c,d,e,f,g) _dir_lookup_blk(a,b,c,d,e,f)
61 #define lafs_iblock_alloc(a,b,c,d) _lafs_iblock_alloc(a,b,c)
62 #define ihash_lookup(a,b,c,d,e) _ihash_lookup(a,b,c,d)
63 #define iblock_get(a,b,c,d,e) _iblock_get(a,b,c,d)
64 #define lafs_make_iblock(a,b,c,d) _lafs_make_iblock(a,b,c)
65 #define lafs_leaf_find(a,b,c,d,e,f) _lafs_leaf_find(a,b,c,d,e)
66 #define lafs_inode_dblock(a,b,c) _lafs_inode_dblock(a,b)
67 #define lafs_inode_get_dblock(a,b) _lafs_inode_get_dblock(a)
69 #define strblk(a) lafs_strblk(a)
72 u32_after(u32 a, u32 b)
77 const extern struct inode_operations lafs_file_ino_operations;
78 const extern struct inode_operations lafs_dir_ino_operations;
79 const extern struct inode_operations lafs_subset_ino_operations;
80 const extern struct inode_operations lafs_link_ino_operations;
81 const extern struct inode_operations lafs_special_ino_operations;
82 const extern struct file_operations lafs_file_file_operations;
83 const extern struct file_operations lafs_dir_file_operations;
84 const extern struct file_operations lafs_subset_file_operations;
85 const extern struct address_space_operations lafs_file_aops;
86 const extern struct address_space_operations lafs_index_operations;
97 int lafs_sync_page_io(struct block_device *bdev, sector_t sector, int offset,
98 int size, struct page *page, int rw);
99 int lafs_load_page(struct fs *fs, struct page *p, u64 vaddr, int blocks);
100 int lafs_load_page_async(struct fs *fs, struct page *p, u64 vaddr, int blocks,
101 struct async_complete *ac);
102 int __must_check lafs_load_block(struct block *b, struct bio *bio);
103 int __must_check lafs_wait_block(struct block *b);
104 int __must_check lafs_wait_block_async(struct block *b);
105 int __must_check lafs_find_block(struct datablock *b, int adopt);
106 int __must_check lafs_find_block_async(struct datablock *b);
107 int __must_check lafs_read_block(struct datablock *b);
108 int __must_check lafs_read_block_async(struct datablock *b);
109 int __must_check lafs_find_next(struct inode *b, loff_t *bnum);
110 struct indexblock *lafs_leaf_find(struct inode *inode, u32 addr,
111 int adopt, u32 *next, int async, REFARG);
112 u32 lafs_leaf_next(struct indexblock *ib, u32 start);
113 int lafs_index_empty(struct indexblock *ib);
115 #define set_iolock_info(b) ( (b)->iolock_file = __FILE__, (b)->iolock_line = __LINE__)
117 #define set_iolock_info(b) (0)
119 #define lafs_iolock_block(b) do { _lafs_iolock_block(b); set_iolock_info(b); } while(0)
120 #define lafs_iolock_block_async(b) ( _lafs_iolock_block_async(b) ? ( set_iolock_info(b), 1) : 0)
121 #define lafs_iolock_written(b) do { _lafs_iolock_written(b); set_iolock_info(b); } while(0)
122 #define lafs_iolock_written_async(b) ( _lafs_iolock_written_async(b) ? ( set_iolock_info(b), 1) : 0)
124 void _lafs_iolock_block(struct block *b);
125 void _lafs_iolock_written(struct block *b);
126 int _lafs_iolock_block_async(struct block *b);
127 int _lafs_iolock_written_async(struct block *b);
129 void lafs_iounlock_block(struct block *b);
130 void lafs_iocheck_block(struct datablock *db, int unlock);
131 void lafs_iocheck_writeback(struct datablock *db, int unlock);
132 void lafs_writeback_done(struct block *b);
134 void lafs_super_write(struct fs *fs, int dev, u64 addr, char *buf, int size);
135 int lafs_super_wait(struct fs *fs);
138 int __must_check lafs_mount(struct fs *fs);
139 struct inode *lafs_iget(struct super_block *fs, ino_t inum, int async);
140 struct inode *lafs_iget_fs(struct fs *fs, int fsnum, int inum, int async);
141 int __must_check lafs_import_inode(struct inode *ino, struct datablock *b);
142 void lafs_inode_checkpin(struct inode *ino);
143 void lafs_clear_inode(struct inode *ino);
144 void lafs_delete_inode(struct inode *ino);
145 void lafs_dirty_inode(struct inode *ino);
146 int lafs_sync_inode(struct inode *ino, int wait);
147 struct inode *lafs_new_inode(struct fs *fs, struct super_block *sb,
148 struct inode *dir, int type,
149 int inum, int mode, struct datablock **inodbp);
150 int lafs_lock_inode(struct inode *ino);
151 void lafs_inode_fillblock(struct inode *ino);
152 struct datablock *lafs_inode_dblock(struct inode *ino, int async, REFARG);
153 struct datablock *lafs_inode_get_dblock(struct inode *ino, REFARG);
154 int lafs_inode_handle_orphan(struct datablock *b);
156 static inline void lafs_iput_fs(struct inode *ino)
158 struct super_block *sb = ino->i_sb;
160 deactivate_super(sb);
163 static inline void lafs_igrab_fs(struct inode *ino)
166 atomic_inc(&ino->i_sb->s_active);
169 struct datablock *lafs_get_block(struct inode *ino, unsigned long index,
170 struct page *p, int gfp, REFARG);
172 void add_ref(struct block *b, char *ref, char *file, int line);
173 void del_ref(struct block *b, char *ref, char *file, int line);
174 int has_ref(struct block *b, char *ref);
177 int lafs_setattr(struct dentry *dentry, struct iattr *attr);
179 char *strblk(struct block *b);
180 int lafs_print_tree(struct block *b, int depth);
181 int lafs_dev_find(struct fs *fs, u64 virt);
184 virttoseg(struct fs *fs, u64 virt, int *devp, u32 *segp, u32 *offsetp)
186 int d = lafs_dev_find(fs, virt);
187 struct fs_dev *dv = &fs->devs[d];
190 if (dv->segment_size >= dv->width * dv->stride) {
191 *offsetp = do_div(virt, dv->segment_size);
195 int of = do_div(v2, dv->stride);
198 do_div(v2, dv->width * dv->stride);
201 *segp = (strp * dv->stride + of) /
202 (dv->segment_size / dv->width);
203 *offsetp = virt - dv->segment_stride * *segp;
208 static inline u64 segtovirt(struct fs *fs, int dev, u32 segnum)
210 return fs->devs[dev].start +
211 (u64)fs->devs[dev].segment_stride * segnum;
215 in_seg(struct fs *fs, int d, u32 seg, u64 virt)
217 struct fs_dev *dv = &fs->devs[d];
221 if (virt < dv->start ||
222 virt >= dv->start + dv->size)
226 if (dv->segment_size >= dv->width * dv->stride) {
227 do_div(virt, dv->segment_size);
231 int of = do_div(v2, dv->stride);
234 do_div(v2, dv->width * dv->stride);
237 return seg == ((strp * dv->stride + of) /
238 (dv->segment_size / dv->width));
243 virttophys(struct fs *fs, u64 virt, int *devp, sector_t *sectp)
245 int d = lafs_dev_find(fs, virt);
246 struct fs_dev *dv = &fs->devs[d];
252 virt <<= (fs->blocksize_bits - 9);
253 virt += (dv->segment_offset)>>9;
257 static inline int dblock_offset(struct datablock *b)
259 return (b - (struct datablock*)b->page->private)
260 << b->b.inode->i_blkbits;
264 static inline int iblock_offset(struct indexblock *b)
266 return (b - (struct indexblock*)b->b.page->private)
267 << b->b.inode->i_blkbits;
270 static inline int block_offset(struct block *b)
272 if (test_bit(B_Index, &b->flags))
273 return iblock_offset(iblk(b));
275 return dblock_offset(dblk(b));
279 static inline void *map_dblock(struct datablock *b)
281 void *a = kmap_atomic(b->page, KM_USER0);
282 a += dblock_offset(b);
286 static inline void unmap_dblock(struct datablock *b, void *buf)
288 kunmap_atomic(buf - dblock_offset(b), KM_USER0);
291 static inline void *map_dblock_2(struct datablock *b)
293 void *a = kmap_atomic(b->page, KM_USER1);
294 a += dblock_offset(b);
298 static inline void unmap_dblock_2(struct datablock *b, void *buf)
300 kunmap_atomic(buf - dblock_offset(b), KM_USER1);
303 static inline void *map_iblock(struct indexblock *b)
305 LAFS_BUG(!test_bit(B_IOLock, &b->b.flags), &b->b);
306 if (test_bit(B_InoIdx, &b->b.flags))
307 return map_dblock(LAFSI(b->b.inode)->dblock);
311 static inline void unmap_iblock(struct indexblock *b, void *buf)
313 if (test_bit(B_InoIdx, &b->b.flags))
314 unmap_dblock(LAFSI(b->b.inode)->dblock, buf);
317 static inline void *map_iblock_2(struct indexblock *b)
319 if (test_bit(B_InoIdx, &b->b.flags))
320 return map_dblock_2(LAFSI(b->b.inode)->dblock);
324 static inline void unmap_iblock_2(struct indexblock *b, void *buf)
326 if (test_bit(B_InoIdx, &b->b.flags))
327 unmap_dblock_2(LAFSI(b->b.inode)->dblock, buf);
330 static inline void decode_time(struct timespec *ts, u64 te)
332 /* low 35 bits are seconds (800 years)
333 * high 29 bits are 2nanoseconds
335 ts->tv_sec = te& (0x7FFFFFFFFULL);
336 ts->tv_nsec = (te>>34) & ~(long)1;
339 static inline u64 encode_time(struct timespec *ts)
343 t &= (0x7FFFFFFFFULL);
344 tn = ts->tv_nsec & ~(long)1;
349 /* s_fs_info points to an allocated sb_key structure */
355 static inline struct fs *fs_from_sb(struct super_block *sb)
357 struct sb_key *k = sb->s_fs_info;
361 static inline struct inode *ino_from_sb(struct super_block *sb)
363 struct sb_key *k = sb->s_fs_info;
367 static inline struct fs *fs_from_inode(struct inode *ino)
369 return fs_from_sb(ino->i_sb);
372 static inline int set_phase(struct block *b, int ph)
374 if (b->inode->i_ino == 0 && b->fileaddr == 0)
375 dprintk("SETPHASE %s to %d\n", strblk(b), ph);
377 set_bit(B_Phase1, &b->flags);
379 clear_bit(B_Phase1, &b->flags);
381 /* FIXME do I need to lock access to ->parent */
382 if (!test_and_set_bit(B_Pinned, &b->flags) &&
384 atomic_inc(&b->parent->pincnt[ph]);
391 * db->my_inode is protected by rcu. We can 'get' it and
392 * remain rcu_protected, or 'iget' it and be protected by a
395 static inline struct inode *rcu_my_inode(struct datablock *db)
400 if (LAFSI(db->b.inode)->type != TypeInodeFile)
403 ino = rcu_dereference(db->my_inode);
409 static inline void rcu_iput(struct inode *ino)
415 static inline struct inode *iget_my_inode(struct datablock *db)
417 struct inode *ino = rcu_my_inode(db);
418 struct inode *rv = NULL;
426 * blocks (data and index) are reference counted.
427 * 'getref' increments the reference count, and could remove the
428 * block from any 'lru' list. However to save effort, we simply
429 * treat anything on an lru list which has a non-zero reference
430 * count as invisible.
431 * 'putref' drops the count and calls lafs_refile to see if anything
434 int lafs_is_leaf(struct block *b, int ph);
435 void lafs_refile(struct block *b, int dec);
437 extern struct indexblock *lafs_getiref_locked(struct indexblock *ib);
439 extern spinlock_t lafs_hash_lock;
441 static inline struct block *__getref(struct block *b)
444 atomic_inc(&b->refcnt);
448 static inline struct block *_getref(struct block *b)
450 LAFS_BUG(b && atomic_read(&b->refcnt) == 0, b);
454 static inline struct datablock *_getdref_locked(struct datablock *b)
456 LAFS_BUG(!spin_is_locked(&b->b.inode->i_data.private_lock), &b->b);
461 static inline struct block *_getref_locked(struct block *b)
463 LAFS_BUG(!spin_is_locked(&b->inode->i_data.private_lock), b);
464 if (test_bit(B_InoIdx, &b->flags))
465 return &lafs_getiref_locked(iblk(b))->b;
470 static inline struct indexblock *_getiref_locked_needsync(struct indexblock *b)
472 LAFS_BUG(!spin_is_locked(&lafs_hash_lock), &b->b);
473 if (test_bit(B_InoIdx, &b->b.flags))
474 return lafs_getiref_locked(b);
479 static inline struct block *_getref_locked_needsync(struct block *b)
481 LAFS_BUG(!spin_is_locked(&fs_from_inode(b->inode)->lock), b);
482 if (test_bit(B_InoIdx, &b->flags))
483 return &lafs_getiref_locked(iblk(b))->b;
488 static inline void _putref(struct block *b)
492 BUG_ON(atomic_read(&b->refcnt)==0);
497 #define _reflog(c,blk,ref,add) (c(blk))
499 #define _reflog(c,blk,ref,add) ({ !blk ? 0 : \
500 add? add_ref(blk,ref,__FILE__,__LINE__) : \
501 del_ref(blk,ref, __FILE__,__LINE__); c(blk); })
502 #define _refxlog(c,blk,ref,add) ({ \
503 add? add_ref(&(blk)->b,ref,__FILE__,__LINE__) : \
504 del_ref(&(blk)->b,ref, __FILE__,__LINE__); c(blk); })
506 #define getref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->refcnt));}), _reflog(_getref, blk,r,1))
507 #define getref_locked(blk, r) _reflog(_getref_locked, blk,r,1)
508 #define getdref_locked(blk, r) _refxlog(_getdref_locked, blk,r,1)
509 #define getref_locked_needsync(blk, r) _reflog(_getref_locked_needsync, blk,r,1)
510 #define getiref_locked_needsync(blk, r) _refxlog(_getiref_locked_needsync, blk,r,1)
511 #define putref(blk, r) _reflog(_putref, blk,r,0)
512 #define getdref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->b.refcnt));}), _reflog(_getref, (&(blk)->b),r,1),blk)
513 #define getiref(blk, r) ( ({BUG_ON((blk) && ! atomic_read(&(blk)->b.refcnt));}), _reflog(_getref, (&(blk)->b),r,1),blk)
514 #define putdref(blk, r) ({ if (blk) _reflog(_putref, (&(blk)->b),r,0);})
515 #define putiref(blk, r) ({ if (blk) _reflog(_putref, (&(blk)->b),r,0);})
517 /* When we get a ref on a block other than under b->inode->i_data.private_lock, we
518 * need to call this to ensure that lafs_refile isn't still handling a refcnt->0 transition.
520 static inline void sync_ref(struct block *b)
522 spin_lock(&b->inode->i_data.private_lock);
523 spin_unlock(&b->inode->i_data.private_lock);
527 * Notes on locking and block references.
528 * There are several places that can hold uncounted references to blocks.
529 * When we try to convert such a reference to a counted reference we need to be careful.
530 * We must increment the refcount under a lock that protects the particular reference, and
531 * then must call sync_ref above to ensure our new reference is safe.
532 * When the last counted reference on a block is dropped (in lafs_refile) we hold private_lock
533 * while cleaning up and sync_ref assures that cleanup is finished.
534 * Before we can free a block we must synchronise with these other locks and
535 * then ensure that the refcount is still zero.
536 * For index blocks that means taking lafs_hash_lock and ensuring the count is still zero.
537 * For most data blocks we need to remove from the lru list using fs->lock, and check the count is
539 * For inode data blocks we need to follow the inode and break the ->dblock lock under the inodes
540 * own lock - but only if the refcount is still zero.
542 * The list of non-counted references and the locks which protect them is:
544 * index hash table lafs_hash_lock
545 * index freelist lafs_hash_lock
546 * phase_leafs / clean_leafs fs->lock
547 * inode->iblock lafs_hash_lock
548 * inode->dblock inode->i_data.private_lock
549 * inode->free_index lafs_hash_lock
550 * page->private page lock
552 * Lock ordering among these is:
553 * inode->i_data.private_lock
554 * LAFSI(inode)->dblock->inode->i_data.private_lock
560 int __must_check lafs_setparent(struct datablock *blk);
563 * extract little-endian values out of memory.
564 * Each function is given a char*, and moves it forwards
567 #define decode16(p) ({ unsigned int _a; _a= (unsigned char)*(p++); \
568 _a + (((unsigned char)*p++)<<8); })
569 #define decode32(p) ({ u32 _b; _b = decode16(p); _b + ((u32)decode16(p)<<16); })
570 #define decode48(p) ({ u64 _c; _c = decode32(p); _c + ((u64)decode16(p)<<32); })
572 #define encode16(p,n) ({ *(p++) = (n)&255; *(p++) = ((n)>>8) & 255; })
573 #define encode32(p,n) ({ encode16(p,n); encode16(p, ((n)>>16)); })
574 #define encode48(p,n) ({ encode32(p,n); encode16(p, ((n)>>32)); })
576 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
578 static inline int space_needed(int len, int chainoffset, int psz)
581 space = len + (chainoffset > 255 ? 4 : chainoffset > 1 ? 1 : 0);
582 space += offsetof(struct dirpiece, name);
583 space = DIV_ROUND_UP(space, 1<<psz);
594 struct update_handle {
599 void lafs_dir_print(char *buf, int psz);
600 int lafs_dir_blk_size(char *block, int psz);
601 void lafs_dir_clearparent(struct datablock *b);
602 struct block *lafs_dir_drop_parent(struct datablock *b);
604 int lafs_hash_name(u32 seed, int len, const char *name);
605 int lafs_dir_find(char *block, int psz, u32 seed, u32 hash,
607 int lafs_dir_findfirst(char *block, int psz);
608 void lafs_dir_init_block(char *block, int psz, const char *name, int len,
609 u32 target, int type, int chainoffset);
610 #define DT_TEST 128 /* for lafs_dir_add_ent, this flags to test for space,
611 * but not do any actual add
613 #define DT_INTERNAL 129 /* Flags that this is an internal dir block */
614 int lafs_dir_add_ent(char *block, int psz, const char *name, int len,
615 u32 target, int type, u32 seed, u32 hash, int chainoffset);
616 int lafs_dir_del_ent(char *block, int psz, u32 seed, u32 hash);
617 int lafs_dir_next_ent(char *block, int psz, int *pp, char *name, u32 *target,
619 void lafs_dir_repack(char *block, int psz, char *new, u32 seed, int merge);
620 void dir_get_prefix(char *b1, char *b2, int psz, char *prefix);
621 void lafs_dir_split(char *orig, int psz, char *new1, char *new2, const char *name,
622 u32 target, int type, u32 *newhash, u32 seed, u32 hash,
624 /*void dir_settarget(char *block, int psz, int piece, u32 target);*/
625 struct dir_ent *lafs_dir_extract(char *block, int psz, struct dir_ent *de,
626 int pnum, u32 *hash);
627 void lafs_dir_set_target(char *block, int psz, struct dir_ent *de, int pnum);
628 int lafs_dir_empty(char *block);
629 void lafs_dir_make_index(char *orig, char *new, int psz, u32 target);
631 int lafs_dir_handle_orphan(struct datablock *db);
633 int lafs_release_page(struct page *page, gfp_t gfp_flags);
634 void lafs_invalidate_page(struct page *page, unsigned long offset);
636 extern struct file_system_type lafs_fs_type;
637 extern struct file_system_type lafs_snap_fs_type;
639 int lafs_ihash_init(void);
640 void lafs_ihash_free(void);
641 void lafs_release_index(struct list_head *head);
642 struct indexblock *lafs_iblock_alloc(struct fs *fs, int gfp, int with_buffer,
644 void lafs_iblock_free(struct indexblock *ib);
645 void lafs_hash_iblock(struct indexblock *ib);
646 void lafs_unhash_iblock(struct indexblock *ib);
648 void lafs_summary_update(struct fs *fs, struct inode *ino,
649 u64 oldphys, u64 newphys, int is_index, int phase,
651 int lafs_summary_allocate(struct fs *fs, struct inode *ino, int diff);
652 void lafs_qcommit(struct fs *fs, struct inode *ino, int diff, int phase);
653 void lafs_incorporate(struct fs *fs, struct indexblock *ib);
654 void lafs_walk_leaf_index(struct indexblock *ib,
655 int (*handle)(void*, u32, u64, int),
657 void lafs_clear_index(struct indexblock *ib);
658 void lafs_print_uninc(struct uninc *ui);
660 int lafs_allocated_block(struct fs *fs, struct block *blk, u64 phys);
662 int lafs_pin_dblock(struct datablock *b, int alloc_type);
663 int lafs_reserve_block(struct block *b, int alloc_type);
664 void lafs_dirty_dblock(struct datablock *b);
665 void lafs_erase_dblock(struct datablock *b);
666 int lafs_erase_dblock_async(struct datablock *b);
667 void lafs_dirty_iblock(struct indexblock *b, int want_realloc);
668 void block_drop_addr(struct fs *fs, struct inode *ino, u32 addr);
669 void lafs_flush(struct datablock *b);
671 bio_end_io_t *lafs_cluster_endio_choose(int which, int header);
672 int lafs_cluster_update_prepare(struct update_handle *uh, struct fs *fs,
674 int lafs_cluster_update_pin(struct update_handle *uh);
676 lafs_cluster_update_commit(struct update_handle *uh,
678 int offset, int len);
680 lafs_cluster_update_commit_buf(struct update_handle *uh, struct fs *fs,
681 struct inode *ino, u32 addr,
682 int offset, int len, const char *str1,
683 int len2, const char *str2);
684 void lafs_cluster_update_abort(struct update_handle *uh);
685 #define MAX_CHEAD_BLOCKS 4
687 static inline void lafs_cluster_wait(struct fs *fs, unsigned long long seq)
689 wait_event(fs->wc[0].pending_wait,
690 fs->wc[0].cluster_seq > seq);
693 void lafs_cluster_wait_all(struct fs *fs);
694 int lafs_cluster_empty(struct fs *fs, int cnum);
697 int lafs_write_state(struct fs *fs);
698 void lafs_destroy_inode(struct inode *inode);
699 struct super_block *lafs_get_subset_sb(struct inode *ino);
702 void lafs_checkpoint_lock(struct fs *fs);
703 void lafs_checkpoint_unlock(struct fs *fs);
704 void lafs_checkpoint_unlock_wait(struct fs *fs);
705 unsigned long long lafs_checkpoint_start(struct fs *fs);
706 unsigned long lafs_do_checkpoint(struct fs *fs);
707 struct block *lafs_get_flushable(struct fs *fs, int phase);
709 int lafs_make_orphan(struct fs *fs, struct datablock *db, struct inode *ino);
710 int lafs_make_orphan_nb(struct fs *fs, struct datablock *db, struct inode *ino);
711 void lafs_orphan_release(struct fs *fs, struct datablock *b, struct inode *ino);
712 long lafs_run_orphans(struct fs *fs);
713 void lafs_add_orphan(struct fs *fs, struct datablock *db);
714 void lafs_orphan_forget(struct fs *fs, struct datablock *db);
715 struct datablock *lafs_find_orphan(struct inode *ino);
716 int lafs_count_orphans(struct inode *ino);
717 void lafs_add_orphans(struct fs *fs, struct inode *ino, int count);
720 int lafs_prealloc(struct block *b, int type);
721 int lafs_seg_ref_block(struct block *b, int ssnum);
722 void lafs_seg_deref(struct fs *fs, u64 addr, int ssnum);
723 void lafs_add_active(struct fs *fs, u64 addr);
724 void lafs_seg_forget(struct fs *fs, int dev, u32 seg);
725 void lafs_seg_flush_all(struct fs *fs);
726 void lafs_seg_apply_all(struct fs *fs);
727 void lafs_seg_put_all(struct fs *fs);
728 void lafs_empty_segment_table(struct fs *fs);
729 int __must_check lafs_seg_dup(struct fs *fs, int which);
730 void lafs_seg_move(struct fs *fs, u64 oldaddr, u64 newaddr,
731 int ssnum, int phase, int moveref);
732 int lafs_segtrack_init(struct segtracker *st);
733 void lafs_segtrack_free(struct segtracker *st);
734 void lafs_update_youth(struct fs *fs, int dev, u32 seg);
736 extern int temp_credits;/* debugging */
737 void lafs_free_get(struct fs *fs, unsigned int *dev, u32 *seg,
739 int lafs_get_cleanable(struct fs *fs, u16 *dev, u32 *seg);
741 void lafs_space_return(struct fs *fs, int credits);
742 int lafs_alloc_cleaner_segs(struct fs *fs, int max);
743 int lafs_space_alloc(struct fs *fs, int credits, int why);
744 unsigned long lafs_scan_seg(struct fs *fs);
745 int lafs_clean_count(struct fs *fs, int *any_clean);
746 void lafs_clean_free(struct fs *fs);
749 unsigned long lafs_do_clean(struct fs *fs);
750 void lafs_unclean(struct datablock *db);
752 /* Thread management */
753 int lafs_start_thread(struct fs *fs);
754 void lafs_stop_thread(struct fs *fs);
755 void lafs_wake_thread(struct fs *fs);
756 void lafs_trigger_flush(struct block *b);
759 int lafs_cluster_allocate(struct block *b, int cnum);
760 void lafs_cluster_flush(struct fs *fs, int cnum);
761 int lafs_calc_cluster_csum(struct cluster_head *head);
762 int lafs_cluster_init(struct fs *fs, int cnum, u64 addr, u64 prev, u64 seq);
763 void lafs_clusters_done(struct fs *fs);
764 void lafs_done_work(struct work_struct *ws);
765 void lafs_close_all_segments(struct fs *fs);
768 void lafs_pin_block_ph(struct block *b, int ph);
769 static inline void lafs_pin_block(struct block *b)
771 lafs_pin_block_ph(b, fs_from_inode(b->inode)->phase);
774 int lafs_add_block_address(struct fs *fs, struct block *blk);
775 void lafs_flip_dblock(struct datablock *db);
776 void lafs_phase_flip(struct fs *fs, struct indexblock *ib);
777 struct indexblock * __must_check
778 lafs_make_iblock(struct inode *ino, int adopt, int async, REFARG);
780 lafs_iblock_get(struct inode *ino, faddr_t addr, int depth, paddr_t phys, REFARG);
783 void lafs_write_head(struct fs *fs, struct cluster_head *head, u64 virt,
785 void lafs_write_block(struct fs *fs, struct block *b, struct wc *wc);
786 void lafs_write_flush(struct fs *fs, struct wc *wc);
789 int lafs_quota_allocate(struct fs *fs, struct inode *ino, int diff);
791 #define __wait_event_lock(wq, condition, lock) \
793 wait_queue_t __wait; \
794 init_waitqueue_entry(&__wait, current); \
796 add_wait_queue(&wq, &__wait); \
798 set_current_state(TASK_UNINTERRUPTIBLE); \
801 spin_unlock(&lock); \
805 current->state = TASK_RUNNING; \
806 remove_wait_queue(&wq, &__wait); \
809 #define wait_event_lock(wq, condition, lock) \
813 __wait_event_lock(wq, condition, lock); \