aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Snitzer2018-05-22 18:26:20 -0400
committerMike Snitzer2018-06-08 11:53:14 -0400
commit72d711c8768805b5f8cf2d23c575dfd188993e12 (patch)
tree1a442b3c9c5d7eb4fb61c2bdd10c19386ae19d28
parentb2b04e7e2d3bffd301d1769700ba013f58ca01b7 (diff)
dm: adjust structure members to improve alignment
Eliminate most holes in DM data structures that were modified by commit 6f1c819c21 ("dm: convert to bioset_init()/mempool_init()"). Also prevent structure members from unnecessarily spanning cache lines. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-bio-prison-v1.c2
-rw-r--r--drivers/md/dm-bio-prison-v2.c2
-rw-r--r--drivers/md/dm-cache-target.c61
-rw-r--r--drivers/md/dm-core.h38
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-kcopyd.c3
-rw-r--r--drivers/md/dm-region-hash.c13
-rw-r--r--drivers/md/dm-thin.c5
-rw-r--r--drivers/md/dm-zoned-target.c2
9 files changed, 79 insertions, 73 deletions
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index e794e3662fdd..b5389890bbc3 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -19,8 +19,8 @@
struct dm_bio_prison {
spinlock_t lock;
- mempool_t cell_pool;
struct rb_root cells;
+ mempool_t cell_pool;
};
static struct kmem_cache *_cell_cache;
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index f866bc97b032..b092cdc8e1ae 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -21,8 +21,8 @@ struct dm_bio_prison_v2 {
struct workqueue_struct *wq;
spinlock_t lock;
- mempool_t cell_pool;
struct rb_root cells;
+ mempool_t cell_pool;
};
static struct kmem_cache *_cell_cache;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 001c71248246..ce14a3d1f609 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -371,7 +371,13 @@ struct cache_stats {
struct cache {
struct dm_target *ti;
- struct dm_target_callbacks callbacks;
+ spinlock_t lock;
+
+ /*
+ * Fields for converting from sectors to blocks.
+ */
+ int sectors_per_block_shift;
+ sector_t sectors_per_block;
struct dm_cache_metadata *cmd;
@@ -402,13 +408,11 @@ struct cache {
dm_cblock_t cache_size;
/*
- * Fields for converting from sectors to blocks.
+ * Invalidation fields.
*/
- sector_t sectors_per_block;
- int sectors_per_block_shift;
+ spinlock_t invalidation_lock;
+ struct list_head invalidation_requests;
- spinlock_t lock;
- struct bio_list deferred_bios;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
atomic_t nr_allocated_migrations;
@@ -419,13 +423,11 @@ struct cache {
*/
atomic_t nr_io_migrations;
+ struct bio_list deferred_bios;
+
struct rw_semaphore quiesce_lock;
- /*
- * cache_size entries, dirty if set
- */
- atomic_t nr_dirty;
- unsigned long *dirty_bitset;
+ struct dm_target_callbacks callbacks;
/*
* origin_blocks entries, discarded if set.
@@ -442,17 +444,27 @@ struct cache {
const char **ctr_args;
struct dm_kcopyd_client *copier;
- struct workqueue_struct *wq;
struct work_struct deferred_bio_worker;
struct work_struct migration_worker;
+ struct workqueue_struct *wq;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
- struct bio_set bs;
- mempool_t migration_pool;
+ /*
+ * cache_size entries, dirty if set
+ */
+ unsigned long *dirty_bitset;
+ atomic_t nr_dirty;
- struct dm_cache_policy *policy;
unsigned policy_nr_args;
+ struct dm_cache_policy *policy;
+
+ /*
+ * Cache features such as write-through.
+ */
+ struct cache_features features;
+
+ struct cache_stats stats;
bool need_tick_bio:1;
bool sized:1;
@@ -461,25 +473,16 @@ struct cache {
bool loaded_mappings:1;
bool loaded_discards:1;
- /*
- * Cache features such as write-through.
- */
- struct cache_features features;
-
- struct cache_stats stats;
+ struct rw_semaphore background_work_lock;
- /*
- * Invalidation fields.
- */
- spinlock_t invalidation_lock;
- struct list_head invalidation_requests;
+ struct batcher committer;
+ struct work_struct commit_ws;
struct io_tracker tracker;
- struct work_struct commit_ws;
- struct batcher committer;
+ mempool_t migration_pool;
- struct rw_semaphore background_work_lock;
+ struct bio_set bs;
};
struct per_bio_data {
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index f21c5d21bf1b..7d480c930eaf 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -31,6 +31,9 @@ struct dm_kobject_holder {
struct mapped_device {
struct mutex suspend_lock;
+ struct mutex table_devices_lock;
+ struct list_head table_devices;
+
/*
* The current mapping (struct dm_table *).
* Use dm_get_live_table{_fast} or take suspend_lock for
@@ -38,17 +41,14 @@ struct mapped_device {
*/
void __rcu *map;
- struct list_head table_devices;
- struct mutex table_devices_lock;
-
unsigned long flags;
- struct request_queue *queue;
- int numa_node_id;
-
- enum dm_queue_mode type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
+ enum dm_queue_mode type;
+
+ int numa_node_id;
+ struct request_queue *queue;
atomic_t holders;
atomic_t open_count;
@@ -56,21 +56,21 @@ struct mapped_device {
struct dm_target *immutable_target;
struct target_type *immutable_target_type;
+ char name[16];
struct gendisk *disk;
struct dax_device *dax_dev;
- char name[16];
-
- void *interface_ptr;
/*
* A list of ios that arrived while we were suspended.
*/
- atomic_t pending[2];
- wait_queue_head_t wait;
struct work_struct work;
+ wait_queue_head_t wait;
+ atomic_t pending[2];
spinlock_t deferred_lock;
struct bio_list deferred;
+ void *interface_ptr;
+
/*
* Event handling.
*/
@@ -84,17 +84,17 @@ struct mapped_device {
unsigned internal_suspend_count;
/*
- * Processing queue (flush)
- */
- struct workqueue_struct *wq;
-
- /*
* io objects are allocated from here.
*/
struct bio_set io_bs;
struct bio_set bs;
/*
+ * Processing queue (flush)
+ */
+ struct workqueue_struct *wq;
+
+ /*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
@@ -102,11 +102,11 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- struct block_device *bdev;
-
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
+ struct block_device *bdev;
+
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index da02f4d8e4b9..4939fbc34ff2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -139,25 +139,13 @@ struct crypt_config {
struct dm_dev *dev;
sector_t start;
- /*
- * pool for per bio private data, crypto requests,
- * encryption requeusts/buffer pages and integrity tags
- */
- mempool_t req_pool;
- mempool_t page_pool;
- mempool_t tag_pool;
- unsigned tag_pool_max_sectors;
-
struct percpu_counter n_allocated_pages;
- struct bio_set bs;
- struct mutex bio_alloc_lock;
-
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- struct task_struct *write_thread;
wait_queue_head_t write_thread_wait;
+ struct task_struct *write_thread;
struct rb_root write_tree;
char *cipher;
@@ -213,6 +201,18 @@ struct crypt_config {
unsigned int integrity_iv_size;
unsigned int on_disk_tag_size;
+ /*
+ * pool for per bio private data, crypto requests,
+ * encryption requeusts/buffer pages and integrity tags
+ */
+ unsigned tag_pool_max_sectors;
+ mempool_t tag_pool;
+ mempool_t req_pool;
+ mempool_t page_pool;
+
+ struct bio_set bs;
+ struct mutex bio_alloc_lock;
+
u8 *authenc_key; /* space for keys in authenc() format (if used) */
u8 key[0];
};
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index ce7efc7434be..3c7547a3c371 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -45,7 +45,6 @@ struct dm_kcopyd_client {
struct dm_io_client *io_client;
wait_queue_head_t destroyq;
- atomic_t nr_jobs;
mempool_t job_pool;
@@ -54,6 +53,8 @@ struct dm_kcopyd_client {
struct dm_kcopyd_throttle *throttle;
+ atomic_t nr_jobs;
+
/*
* We maintain three lists of jobs:
*
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index abf3521b80a8..c832ec398f02 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -63,27 +63,28 @@ struct dm_region_hash {
/* hash table */
rwlock_t hash_lock;
- mempool_t region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
unsigned shift;
struct list_head *buckets;
+ /*
+ * If there was a flush failure no regions can be marked clean.
+ */
+ int flush_failure;
+
unsigned max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
- struct semaphore recovery_count;
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
+ struct semaphore recovery_count;
- /*
- * If there was a flush failure no regions can be marked clean.
- */
- int flush_failure;
+ mempool_t region_pool;
void *context;
sector_t target_begin;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5772756c63c1..6cf9c9364103 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -240,9 +240,9 @@ struct pool {
struct dm_bio_prison *prison;
struct dm_kcopyd_client *copier;
+ struct work_struct worker;
struct workqueue_struct *wq;
struct throttle throttle;
- struct work_struct worker;
struct delayed_work waker;
struct delayed_work no_space_timeout;
@@ -260,7 +260,6 @@ struct pool {
struct dm_deferred_set *all_io_ds;
struct dm_thin_new_mapping *next_mapping;
- mempool_t mapping_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
@@ -273,6 +272,8 @@ struct pool {
process_mapping_fn process_prepared_discard_pt2;
struct dm_bio_prison_cell **cell_sort_array;
+
+ mempool_t mapping_pool;
};
static enum pool_mode get_pool_mode(struct pool *pool);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 30602d15ad9a..3c0e45f4dcf5 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -52,9 +52,9 @@ struct dmz_target {
struct dmz_reclaim *reclaim;
/* For chunk work */
- struct mutex chunk_lock;
struct radix_tree_root chunk_rxtree;
struct workqueue_struct *chunk_wq;
+ struct mutex chunk_lock;
/* For cloned BIOs to zones */
struct bio_set bio_set;