diff options
author | Linus Torvalds | 2015-04-18 11:26:46 -0400 |
---|---|---|
committer | Linus Torvalds | 2015-04-18 11:26:46 -0400 |
commit | 96b90f27bcf22f1d06cc16d9475cefa6ea4c4718 (patch) | |
tree | a886ad5f611dea36c6d4b615dfdcdbbcf5bd3135 /include/trace | |
parent | 396c9df2231865ef55aa031e3f5df9d99e036869 (diff) | |
parent | 0c99241c93b8060441f3c8434848e54b5338f922 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"This update has mostly fixes, but also other bits:
- perf tooling fixes
- PMU driver fixes
- Intel Broadwell PMU driver HW-enablement for LBR callstacks
- a late coming 'perf kmem' tool update that enables it to also
analyze page allocation data. Note, this comes with MM tracepoint
changes that we believe to not break anything: because it changes
the formerly opaque 'struct page *' field that uniquely identifies
pages to 'pfn' which identifies pages uniquely too, but isn't as
opaque and can be used for other purposes as well"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/intel/pt: Fix and clean up error handling in pt_event_add()
perf/x86/intel: Add Broadwell support for the LBR callstack
perf/x86/intel/rapl: Fix energy counter measurements but supporing per domain energy units
perf/x86/intel: Fix Core2,Atom,NHM,WSM cycles:pp events
perf/x86: Fix hw_perf_event::flags collision
perf probe: Fix segfault when probe with lazy_line to file
perf probe: Find compilation directory path for lazy matching
perf probe: Set retprobe flag when probe in address-based alternative mode
perf kmem: Analyze page allocator events also
tracing, mm: Record pfn instead of pointer to struct page
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/events/filemap.h | 8 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 42 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 8 |
3 files changed, 29 insertions, 29 deletions
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h index 0421f49a20f7..42febb6bc1d5 100644 --- a/include/trace/events/filemap.h +++ b/include/trace/events/filemap.h @@ -18,14 +18,14 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, TP_ARGS(page), TP_STRUCT__entry( - __field(struct page *, page) + __field(unsigned long, pfn) __field(unsigned long, i_ino) __field(unsigned long, index) __field(dev_t, s_dev) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->i_ino = page->mapping->host->i_ino; __entry->index = page->index; if (page->mapping->host->i_sb) @@ -37,8 +37,8 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->index << PAGE_SHIFT) ); diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 4ad10baecd4d..81ea59812117 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -154,18 +154,18 @@ TRACE_EVENT(mm_page_free, TP_ARGS(page, order), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( unsigned int, order ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->order = order; ), TP_printk("page=%p pfn=%lu order=%d", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->order) ); @@ -176,18 +176,18 @@ TRACE_EVENT(mm_page_free_batched, TP_ARGS(page, cold), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( int, cold ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->cold = cold; ), TP_printk("page=%p pfn=%lu order=0 cold=%d", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->cold) ); @@ -199,22 +199,22 @@ TRACE_EVENT(mm_page_alloc, TP_ARGS(page, order, gfp_flags, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( unsigned int, order ) __field( gfp_t, gfp_flags ) __field( int, migratetype ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; __entry->gfp_flags = gfp_flags; __entry->migratetype = migratetype; ), TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", - __entry->page, - __entry->page ? page_to_pfn(__entry->page) : 0, + __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, + __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, __entry->migratetype, show_gfp_flags(__entry->gfp_flags)) @@ -227,20 +227,20 @@ DECLARE_EVENT_CLASS(mm_page, TP_ARGS(page, order, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( unsigned int, order ) __field( int, migratetype ) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; __entry->migratetype = migratetype; ), TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", - __entry->page, - __entry->page ? page_to_pfn(__entry->page) : 0, + __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, + __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, __entry->migratetype, __entry->order == 0) @@ -260,7 +260,7 @@ DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, TP_ARGS(page, order, migratetype), TP_printk("page=%p pfn=%lu order=%d migratetype=%d", - __entry->page, page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), __entry->pfn, __entry->order, __entry->migratetype) ); @@ -275,7 +275,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, alloc_migratetype, fallback_migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field( unsigned long, pfn ) __field( int, alloc_order ) __field( int, fallback_order ) __field( int, alloc_migratetype ) @@ -284,7 +284,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->alloc_order = alloc_order; __entry->fallback_order = fallback_order; __entry->alloc_migratetype = alloc_migratetype; @@ -294,8 +294,8 @@ TRACE_EVENT(mm_page_alloc_extfrag, ), TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, __entry->alloc_order, __entry->fallback_order, pageblock_order, diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 69590b6ffc09..f66476b96264 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -336,18 +336,18 @@ TRACE_EVENT(mm_vmscan_writepage, TP_ARGS(page, reclaim_flags), TP_STRUCT__entry( - __field(struct page *, page) + __field(unsigned long, pfn) __field(int, reclaim_flags) ), TP_fast_assign( - __entry->page = page; + __entry->pfn = page_to_pfn(page); __entry->reclaim_flags = reclaim_flags; ), TP_printk("page=%p pfn=%lu flags=%s", - __entry->page, - page_to_pfn(__entry->page), + pfn_to_page(__entry->pfn), + __entry->pfn, show_reclaim_flags(__entry->reclaim_flags)) ); |