diff options
author | Dennis Zhou (Facebook) | 2017-07-24 19:02:14 -0400 |
---|---|---|
committer | Tejun Heo | 2017-07-26 17:41:05 -0400 |
commit | 13f966373f9296c0da2fb2764654cce520b3a6b4 (patch) | |
tree | fe1bee3a86577ecf25907f3586ae36cb338e0dcd /mm | |
parent | 86b442fbce74d6cd0805410ef228776cbd0338d7 (diff) |
percpu: skip chunks if the alloc does not fit in the contig hint
This patch adds chunk->contig_bits_start to keep track of the contig
hint's offset and the check to skip the chunk if it does not fit. If
the chunk's contig hint starting offset cannot satisfy an allocation,
the allocator assumes there is enough memory pressure in this chunk to
either use a different chunk or create a new one. This accepts a less
tight packing for a smoother latency curve.
Signed-off-by: Dennis Zhou <dennisszhou@gmail.com>
Reviewed-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/percpu-internal.h | 2 | ||||
-rw-r--r-- | mm/percpu.c | 18 |
2 files changed, 18 insertions, 2 deletions
diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index e60e04966d96..7065faf74b46 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -29,6 +29,8 @@ struct pcpu_chunk { struct list_head list; /* linked to pcpu_slot lists */ int free_bytes; /* free bytes in the chunk */ int contig_bits; /* max contiguous size hint */ + int contig_bits_start; /* contig_bits starting + offset */ void *base_addr; /* base address of this chunk */ unsigned long *alloc_map; /* allocation map */ diff --git a/mm/percpu.c b/mm/percpu.c index 83abb190ca5a..734745a0c9b6 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -400,12 +400,14 @@ static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off, * @bit_off: chunk offset * @bits: size of free area * - * This updates the chunk's contig hint given a free area. + * This updates the chunk's contig hint and starting offset given a free area. */ static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits) { - if (bits > chunk->contig_bits) + if (bits > chunk->contig_bits) { + chunk->contig_bits_start = bit_off; chunk->contig_bits = bits; + } } /** @@ -416,6 +418,7 @@ static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits) * * Updates: * chunk->contig_bits + * chunk->contig_bits_start * nr_empty_pop_pages */ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk) @@ -646,6 +649,17 @@ static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, int bit_off, bits; int re; /* region end */ + /* + * Check to see if the allocation can fit in the chunk's contig hint. + * This is an optimization to prevent scanning by assuming if it + * cannot fit in the global hint, there is memory pressure and creating + * a new chunk would happen soon. + */ + bit_off = ALIGN(chunk->contig_bits_start, align) - + chunk->contig_bits_start; + if (bit_off + alloc_bits > chunk->contig_bits) + return -1; + pcpu_for_each_unpop_region(chunk->alloc_map, bit_off, re, chunk->first_bit, pcpu_chunk_map_bits(chunk)) { |