aboutsummaryrefslogtreecommitdiff
path: root/mm/percpu-vm.c
diff options
context:
space:
mode:
authorMaarten Lankhorst2021-07-27 12:48:17 +0200
committerMaarten Lankhorst2021-07-27 12:48:17 +0200
commitca31fef11dc83e672415d5925a134749761329bd (patch)
tree8eb6a489e2d6dd117300f40ed8fc945a06bb6eee /mm/percpu-vm.c
parentba6cd766e0bf933611dc66fcb86f72ac80a446bc (diff)
parent15d27b15de965043d6f8e23bc7f34386fcd1a772 (diff)
Backmerge remote-tracking branch 'drm/drm-next' into drm-misc-next
Required bump from v5.13-rc3 to v5.14-rc3, and to pick up sysfb compilation fixes. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r--mm/percpu-vm.c40
1 files changed, 35 insertions, 5 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 8d3844bc0c7c..2054c9213c43 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -303,6 +303,9 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
* For each cpu, depopulate and unmap pages [@page_start,@page_end)
* from @chunk.
*
+ * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the
+ * region back to vmalloc() which will lazily flush the tlb.
+ *
* CONTEXT:
* pcpu_alloc_mutex.
*/
@@ -324,18 +327,15 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
pcpu_unmap_pages(chunk, pages, page_start, page_end);
- /* no need to flush tlb, vmalloc will handle it lazily */
-
pcpu_free_pages(chunk, pages, page_start, page_end);
}
-static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
- gfp_t gfp)
+static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
{
struct pcpu_chunk *chunk;
struct vm_struct **vms;
- chunk = pcpu_alloc_chunk(type, gfp);
+ chunk = pcpu_alloc_chunk(gfp);
if (!chunk)
return NULL;
@@ -378,3 +378,33 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
/* no extra restriction */
return 0;
}
+
+/**
+ * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim
+ * @chunk: chunk of interest
+ *
+ * This is the entry point for percpu reclaim. If a chunk qualifies, it is then
+ * isolated and managed in separate lists at the back of pcpu_slot: sidelined
+ * and to_depopulate respectively. The to_depopulate list holds chunks slated
+ * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once
+ * they are on this list. Once depopulated, they are moved onto the sidelined
+ * list which enables them to be pulled back in for allocation if no other chunk
+ * can suffice the allocation.
+ */
+static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk)
+{
+ /* do not reclaim either the first chunk or reserved chunk */
+ if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk)
+ return false;
+
+ /*
+ * If it is isolated, it may be on the sidelined list so move it back to
+ * the to_depopulate list. If we hit at least 1/4 pages empty pages AND
+ * there is no system-wide shortage of empty pages aside from this
+ * chunk, move it to the to_depopulate list.
+ */
+ return ((chunk->isolated && chunk->nr_empty_pop_pages) ||
+ (pcpu_nr_empty_pop_pages >
+ (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) &&
+ chunk->nr_empty_pop_pages >= chunk->nr_pages / 4));
+}