diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-06 17:17:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-06 17:17:13 -0800 |
commit | 27eb427bdc0960ad64b72da03e3596c801e7a9e9 (patch) | |
tree | 4170a265e99d455ca53d26a19e59330e3277fccd /include/trace | |
parent | 713009809681e5a7871e96e6992692c805b4480b (diff) | |
parent | 2959a32a858a2c44bbbce83d19c158d54cc5998a (diff) |
Merge branch 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason:
"We have a lot of subvolume quota improvements in here, along with big
piles of cleanups from Dave Sterba and Anand Jain and others.
Josef pitched in a batch of allocator fixes based on production use
here at FB. We found that mount -o ssd_spread greatly improved our
performance on hardware raid5/6, but it exposed some CPU bottlenecks
in the allocator. These patches make a huge difference"
* 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (100 commits)
Btrfs: fix hole punching when using the no-holes feature
Btrfs: find_free_extent: Do not erroneously skip LOOP_CACHING_WAIT state
btrfs: Fix a data space underflow warning
btrfs: qgroup: Fix a rebase bug which will cause qgroup double free
btrfs: qgroup: Fix a race in delayed_ref which leads to abort trans
btrfs: clear PF_NOFREEZE in cleaner_kthread()
btrfs: qgroup: Don't copy extent buffer to do qgroup rescan
btrfs: add balance filters limits, stripes and usage to supported mask
btrfs: extend balance filter usage to take minimum and maximum
btrfs: add balance filter for stripes
btrfs: extend balance filter limit to take minimum and maximum
btrfs: fix use after free iterating extrefs
btrfs: check unsupported filters in balance arguments
Btrfs: fix regression running delayed references when using qgroups
Btrfs: fix regression when running delayed references
Btrfs: don't do extra bitmap search in one bit case
Btrfs: keep track of largest extent in bitmaps
Btrfs: don't keep trying to build clusters if we are fragmented
Btrfs: cut down on loops through the allocator
Btrfs: don't continue setting up space cache when enospc
...
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/events/btrfs.h | 113 |
1 files changed, 113 insertions, 0 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 0b73af9be12f..b4473dab39d6 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1117,6 +1117,119 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, TP_ARGS(wq) ); +DECLARE_EVENT_CLASS(btrfs__qgroup_data_map, + + TP_PROTO(struct inode *inode, u64 free_reserved), + + TP_ARGS(inode, free_reserved), + + TP_STRUCT__entry( + __field( u64, rootid ) + __field( unsigned long, ino ) + __field( u64, free_reserved ) + ), + + TP_fast_assign( + __entry->rootid = BTRFS_I(inode)->root->objectid; + __entry->ino = inode->i_ino; + __entry->free_reserved = free_reserved; + ), + + TP_printk("rootid=%llu, ino=%lu, free_reserved=%llu", + __entry->rootid, __entry->ino, __entry->free_reserved) +); + +DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_init_data_rsv_map, + + TP_PROTO(struct inode *inode, u64 free_reserved), + + TP_ARGS(inode, free_reserved) +); + +DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_free_data_rsv_map, + + TP_PROTO(struct inode *inode, u64 free_reserved), + + TP_ARGS(inode, free_reserved) +); + +#define BTRFS_QGROUP_OPERATIONS \ + { QGROUP_RESERVE, "reserve" }, \ + { QGROUP_RELEASE, "release" }, \ + { QGROUP_FREE, "free" } + +DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data, + + TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op), + + TP_ARGS(inode, start, len, reserved, op), + + TP_STRUCT__entry( + __field( u64, rootid ) + __field( unsigned long, ino ) + __field( u64, start ) + __field( u64, len ) + __field( u64, reserved ) + __field( int, op ) + ), + + TP_fast_assign( + __entry->rootid = BTRFS_I(inode)->root->objectid; + __entry->ino = inode->i_ino; + __entry->start = start; + __entry->len = len; + __entry->reserved = reserved; + __entry->op = op; + ), + + TP_printk("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s", + __entry->rootid, __entry->ino, __entry->start, __entry->len, + __entry->reserved, + __print_flags((unsigned long)__entry->op, "", + BTRFS_QGROUP_OPERATIONS) + ) +); + +DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_reserve_data, + + TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op), + + TP_ARGS(inode, start, len, reserved, op) +); + +DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data, + + TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op), + + TP_ARGS(inode, start, len, reserved, op) +); + +DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref, + + TP_PROTO(u64 ref_root, u64 reserved), + + TP_ARGS(ref_root, reserved), + + TP_STRUCT__entry( + __field( u64, ref_root ) + __field( u64, reserved ) + ), + + TP_fast_assign( + __entry->ref_root = ref_root; + __entry->reserved = reserved; + ), + + TP_printk("root=%llu, reserved=%llu, op=free", + __entry->ref_root, __entry->reserved) +); + +DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref, + + TP_PROTO(u64 ref_root, u64 reserved), + + TP_ARGS(ref_root, reserved) +); #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ |