aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorThomas Gleixner2019-11-11 23:03:22 +0100
committerThomas Gleixner2019-11-16 11:24:02 +0100
commit060aa16fdb7c5078a4159a76e5dc87d6a493af9b (patch)
tree63f2ffbc60ce9e1064ca13bc128efeb7c1e7f6c8 /arch/x86/include
parent577d5cd7e5851d3832066cd0422475fa7db2ee17 (diff)
x86/ioperm: Add bitmap sequence number
Add a globally unique sequence number which is incremented when ioperm() is changing the I/O bitmap of a task. Store the new sequence number in the io_bitmap structure and compare it with the sequence number of the I/O bitmap which was last loaded on a CPU. Only update the bitmap if the sequence is different. That should further reduce the overhead of I/O bitmap scheduling when there are only a few I/O bitmap users on the system. The 64bit sequence counter is sufficient. A wraparound of the sequence counter assuming an ioperm() call every nanosecond would require about 584 years of uptime. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/io_bitmap.h1
-rw-r--r--arch/x86/include/asm/processor.h3
2 files changed, 4 insertions, 0 deletions
diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
index 1a12b9ff5e4e..d63bd5afe671 100644
--- a/arch/x86/include/asm/io_bitmap.h
+++ b/arch/x86/include/asm/io_bitmap.h
@@ -5,6 +5,7 @@
#include <asm/processor.h>
struct io_bitmap {
+ u64 sequence;
/* The maximum number of bytes to copy so all zero bits are covered */
unsigned int max;
unsigned long bitmap[IO_BITMAP_LONGS];
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c949e0e5cbe6..40bb0f7bca3f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -361,6 +361,9 @@ struct entry_stack_page {
* All IO bitmap related data stored in the TSS:
*/
struct x86_io_bitmap {
+ /* The sequence number of the last active bitmap. */
+ u64 prev_sequence;
+
/*
* Store the dirty size of the last io bitmap offender. The next
* one will have to do the cleanup as the switch out to a non io