aboutsummaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds2020-12-16 12:57:51 -0800
committerLinus Torvalds2020-12-16 12:57:51 -0800
commitac7ac4618cf25e0d5cd8eba83d5f600084b65b9a (patch)
treee5d28907ff72690a0463a2238b96202d751a535c /include/trace
parent48aba79bcf6ea05148dc82ad9c40713960b00396 (diff)
parentfa94ba8a7b22890e6a17b39b9359e114fe18cd59 (diff)
Merge tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Another series of killing more code than what is being added, again thanks to Christoph's relentless cleanups and tech debt tackling. This contains: - blk-iocost improvements (Baolin Wang) - part0 iostat fix (Jeffle Xu) - Disable iopoll for split bios (Jeffle Xu) - block tracepoint cleanups (Christoph Hellwig) - Merging of struct block_device and hd_struct (Christoph Hellwig) - Rework/cleanup of how block device sizes are updated (Christoph Hellwig) - Simplification of gendisk lookup and removal of block device aliasing (Christoph Hellwig) - Block device ioctl cleanups (Christoph Hellwig) - Removal of bdget()/blkdev_get() as exported API (Christoph Hellwig) - Disk change rework, avoid ->revalidate_disk() (Christoph Hellwig) - sbitmap improvements (Pavel Begunkov) - Hybrid polling fix (Pavel Begunkov) - bvec iteration improvements (Pavel Begunkov) - Zone revalidation fixes (Damien Le Moal) - blk-throttle limit fix (Yu Kuai) - Various little fixes" * tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block: (126 commits) blk-mq: fix msec comment from micro to milli seconds blk-mq: update arg in comment of blk_mq_map_queue blk-mq: add helper allocating tagset->tags Revert "block: Fix a lockdep complaint triggered by request queue flushing" nvme-loop: use blk_mq_hctx_set_fq_lock_class to set loop's lock class blk-mq: add new API of blk_mq_hctx_set_fq_lock_class block: disable iopoll for split bio block: Improve blk_revalidate_disk_zones() checks sbitmap: simplify wrap check sbitmap: replace CAS with atomic and sbitmap: remove swap_lock sbitmap: optimise sbitmap_deferred_clear() blk-mq: skip hybrid polling if iopoll doesn't spin blk-iocost: Factor out the base vrate change into a separate function blk-iocost: Factor out the active iocgs' state check into a separate function blk-iocost: Move the usage ratio calculation to the correct place blk-iocost: Remove unnecessary advance declaration blk-iocost: Fix some typos in comments blktrace: fix up a kerneldoc comment block: remove the request_queue to argument request based tracepoints ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/block.h228
1 files changed, 54 insertions, 174 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 34d64ca306b1..0d782663a005 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -64,7 +64,6 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
/**
* block_rq_requeue - place block IO request back on a queue
- * @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
@@ -73,9 +72,9 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
*/
TRACE_EVENT(block_rq_requeue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -147,9 +146,9 @@ TRACE_EVENT(block_rq_complete,
DECLARE_EVENT_CLASS(block_rq,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -181,7 +180,6 @@ DECLARE_EVENT_CLASS(block_rq,
/**
* block_rq_insert - insert block operation request into queue
- * @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
@@ -191,14 +189,13 @@ DECLARE_EVENT_CLASS(block_rq,
*/
DEFINE_EVENT(block_rq, block_rq_insert,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
- * @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is sent to a
@@ -206,14 +203,13 @@ DEFINE_EVENT(block_rq, block_rq_insert,
*/
DEFINE_EVENT(block_rq, block_rq_issue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
* block_rq_merge - merge request with another one in the elevator
- * @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is merged to another
@@ -221,48 +217,9 @@ DEFINE_EVENT(block_rq, block_rq_issue,
*/
DEFINE_EVENT(block_rq, block_rq_merge,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
-);
-
-/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @q: queue holding the block operation
- * @bio: block operation
- *
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
- */
-TRACE_EVENT(block_bio_bounce,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ TP_ARGS(rq)
);
/**
@@ -301,11 +258,11 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
-DECLARE_EVENT_CLASS(block_bio_merge,
+DECLARE_EVENT_CLASS(block_bio,
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
+ TP_PROTO(struct bio *bio),
- TP_ARGS(q, rq, bio),
+ TP_ARGS(bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -330,133 +287,62 @@ DECLARE_EVENT_CLASS(block_bio_merge,
);
/**
+ * block_bio_bounce - used bounce buffer when processing block operation
+ * @bio: block operation
+ *
+ * A bounce buffer was used to handle the block operation @bio in @q.
+ * This occurs when hardware limitations prevent a direct transfer of
+ * data between the @bio data memory area and the IO device. Use of a
+ * bounce buffer requires extra copying of data and decreases
+ * performance.
+ */
+DEFINE_EVENT(block_bio, block_bio_bounce,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+/**
* block_bio_backmerge - merging block operation to the end of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block request @bio to the end of an existing block request
- * in queue @q.
+ * Merging block request @bio to the end of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_backmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block IO operation @bio to the beginning of an existing block
- * operation in queue @q.
+ * Merging block IO operation @bio to the beginning of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_frontmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
- * @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
-TRACE_EVENT(block_bio_queue,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
-);
-
-DECLARE_EVENT_CLASS(block_get_rq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio ? bio_dev(bio) : 0;
- __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
- __entry->nr_sector = bio ? bio_sectors(bio) : 0;
- blk_fill_rwbs(__entry->rwbs,
- bio ? bio->bi_opf : 0, __entry->nr_sector);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+DEFINE_EVENT(block_bio, block_bio_queue,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
- * @q: queue for operations
* @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
*
- * A request struct for queue @q has been allocated to handle the
- * block IO operation @bio.
+ * A request struct has been allocated to handle the block IO operation @bio.
*/
-DEFINE_EVENT(block_get_rq, block_getrq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
-);
-
-/**
- * block_sleeprq - waiting to get a free request entry in queue for block IO operation
- * @q: queue for operation
- * @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
- *
- * In the case where a request struct cannot be provided for queue @q
- * the process needs to wait for an request struct to become
- * available. This tracepoint event is generated each time the
- * process goes to sleep waiting for request struct become available.
- */
-DEFINE_EVENT(block_get_rq, block_sleeprq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
+DEFINE_EVENT(block_bio, block_getrq,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
@@ -521,21 +407,19 @@ DEFINE_EVENT(block_unplug, block_unplug,
/**
* block_split - split a single bio struct into two bio structs
- * @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
- * The bio request @bio in request queue @q needs to be split into two
- * bio requests. The newly created @bio request starts at
- * @new_sector. This split may be required due to hardware limitation
- * such as operation crossing device boundaries in a RAID system.
+ * The bio request @bio needs to be split into two bio requests. The newly
+ * created @bio request starts at @new_sector. This split may be required due to
+ * hardware limitations such as operation crossing device boundaries in a RAID
+ * system.
*/
TRACE_EVENT(block_split,
- TP_PROTO(struct request_queue *q, struct bio *bio,
- unsigned int new_sector),
+ TP_PROTO(struct bio *bio, unsigned int new_sector),
- TP_ARGS(q, bio, new_sector),
+ TP_ARGS(bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -562,9 +446,8 @@ TRACE_EVENT(block_split,
/**
* block_bio_remap - map request for a logical device to the raw device
- * @q: queue holding the operation
* @bio: revised operation
- * @dev: device for the operation
+ * @dev: original device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
@@ -572,10 +455,9 @@ TRACE_EVENT(block_split,
*/
TRACE_EVENT(block_bio_remap,
- TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t from),
+ TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
- TP_ARGS(q, bio, dev, from),
+ TP_ARGS(bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -605,7 +487,6 @@ TRACE_EVENT(block_bio_remap,
/**
* block_rq_remap - map request for a block operation request
- * @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
@@ -616,10 +497,9 @@ TRACE_EVENT(block_bio_remap,
*/
TRACE_EVENT(block_rq_remap,
- TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
- sector_t from),
+ TP_PROTO(struct request *rq, dev_t dev, sector_t from),
- TP_ARGS(q, rq, dev, from),
+ TP_ARGS(rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )