aboutsummaryrefslogtreecommitdiff
path: root/include/linux/swiotlb.h
diff options
context:
space:
mode:
authorTianyu Lan2022-07-08 12:15:44 -0400
committerChristoph Hellwig2022-07-13 13:23:10 +0200
commit20347fca71a387a3751f7bb270062616ddc5317a (patch)
tree202597625c17d2211fbd4f65abb65f4b55b0d2b5 /include/linux/swiotlb.h
parentc51ba246cb172c9e947dc6fb8868a1eaf0b2a913 (diff)
swiotlb: split up the global swiotlb lock
Traditionally swiotlb was not performance critical because it was only used for slow devices. But in some setups, like TDX/SEV confidential guests, all IO has to go through swiotlb. Currently swiotlb only has a single lock. Under high IO load with multiple CPUs this can lead to significat lock contention on the swiotlb lock. This patch splits the swiotlb bounce buffer pool into individual areas which have their own lock. Each CPU tries to allocate in its own area first. Only if that fails does it search other areas. On freeing the allocation is freed into the area where the memory was originally allocated from. Area number can be set via swiotlb kernel parameter and is default to be possible cpu number. If possible cpu number is not power of 2, area number will be round up to the next power of 2. This idea from Andi Kleen patch(https://github.com/intel/tdx/commit/ 4529b5784c141782c72ec9bd9a92df2b68cb7d45). Based-on-idea-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'include/linux/swiotlb.h')
-rw-r--r--include/linux/swiotlb.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index bdc58a0e20b1..f65ff1930120 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -88,6 +88,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* @late_alloc: %true if allocated using the page allocator
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
+ * @nareas: The area number in the pool.
+ * @area_nslabs: The slot number in the area.
*/
struct io_tlb_mem {
phys_addr_t start;
@@ -101,6 +103,9 @@ struct io_tlb_mem {
bool late_alloc;
bool force_bounce;
bool for_alloc;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
struct io_tlb_slot {
phys_addr_t orig_addr;
size_t alloc_size;