aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEdward Wu2022-06-16 07:34:20 +0800
committerPaul Kocialkowski2024-01-31 15:35:55 +0100
commite9237a20fea5f164481fcab3ff286f4e1da74550 (patch)
treec8bc6cb1539fc730784da87ac491eaaf7312b8ff
parent360b6d286bf6cb194130f0045af14504af783887 (diff)
SPL-14059 mm: cma: sync everything after EBUSY
Since file-backed memory on CMA area could take long-term pinning. By Minchan Kim's debug commit 151e084af494 ("mm: page_alloc: dump migrate-failed pages only at -EBUSY") We know the pinned page comes from buffer_head, ext4 journal, FS metadata. Sync everything after EBUSY that can unpin most file-system pages. And raise the success rate at next time try. Link: https://lkml.kernel.org/r/20220615021504.23358-1-edwardwu@realtek.com Signed-off-by: Edward Wu <edwardwu@realtek.com>
-rw-r--r--mm/cma.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 30b6ca30009b..3ebf66d6d597 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -31,6 +31,7 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
+#include <linux/syscalls.h>
#include <trace/events/cma.h>
#include "cma.h"
@@ -411,6 +412,24 @@ static void cma_debug_show_areas(struct cma *cma)
static inline void cma_debug_show_areas(struct cma *cma) { }
#endif
+static void cma_sync_work(struct work_struct *work)
+{
+ ksys_sync();
+ kfree(work);
+ pr_debug("%s(): EBUSY Sync complete\n", __func__);
+}
+
+static void cma_ebusy_sync_pinned_pages(void)
+{
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, cma_sync_work);
+ schedule_work(work);
+ }
+}
+
/**
* cma_alloc() - allocate pages from contiguous area
* @cma: Contiguous memory region for which the allocation is performed.
@@ -431,6 +450,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
unsigned long i;
struct page *page = NULL;
int ret = -ENOMEM;
+ bool sys_synchronized = false;
if (!cma || !cma->count || !cma->bitmap)
goto out;
@@ -482,6 +502,11 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
if (ret != -EBUSY)
break;
+ if (!sys_synchronized) {
+ sys_synchronized = true;
+ cma_ebusy_sync_pinned_pages();
+ }
+
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));