diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2011-05-18 17:06:31 +0200 |
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2011-05-18 17:06:49 +0200 |
| commit | 6b7b8e488bbdedeccabdd001a78ffcbe43bb8a3a (patch) | |
| tree | f2f77cc31b4548745778fca6a51b09e1d8a49804 /fs/btrfs/ordered-data.c | |
| parent | b50f315cbb865079a16a12fd9ae6083f98fd592c (diff) | |
| parent | c1d10d18c542278b7fbc413c289d3cb6219da6b3 (diff) | |
Merge branch 'master' into upstream.
This is sync with Linus' tree to receive KEY_IMAGES definition
that went in through input tree.
Diffstat (limited to 'fs/btrfs/ordered-data.c')
| -rw-r--r-- | fs/btrfs/ordered-data.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 083a55477375..a1c940425307 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -202,6 +202,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); + trace_btrfs_ordered_extent_add(inode, entry); + spin_lock(&tree->lock); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); @@ -387,6 +389,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) struct list_head *cur; struct btrfs_ordered_sum *sum; + trace_btrfs_ordered_extent_put(entry->inode, entry); + if (atomic_dec_and_test(&entry->refs)) { while (!list_empty(&entry->list)) { cur = entry->list.next; @@ -420,6 +424,8 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, spin_lock(&root->fs_info->ordered_extent_lock); list_del_init(&entry->root_extent_list); + trace_btrfs_ordered_extent_remove(inode, entry); + /* * we have no more ordered extents for this inode and * no dirty pages. We can safely remove it from the @@ -585,6 +591,8 @@ void btrfs_start_ordered_extent(struct inode *inode, u64 start = entry->file_offset; u64 end = start + entry->len - 1; + trace_btrfs_ordered_extent_start(inode, entry); + /* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting |
