aboutsummaryrefslogtreecommitdiff
path: root/arch/nios2/mm
diff options
context:
space:
mode:
authorLey Foon Tan2015-02-09 18:11:29 +0800
committerLey Foon Tan2015-02-09 18:11:29 +0800
commit96f3a5cc33baede169e0d330119090789e97e86b (patch)
treea3b974676527451ef7542ca1029b92d136ceaee7 /arch/nios2/mm
parentad7ef26d433bbf21a915652d96ad07048a0a4e26 (diff)
nios2: Port OOM changes to do_page_fault()
Commit d065bd810b6d ("mm: retry page fault when blocking on disk transfer") and and commit 37b23e0525d3 ("x86,mm: make pagefault killable") The above commits introduced changes into the nios2 pagefault handler for making the page fault handler retryable as well as killable. These changes reduce the mmap_sem hold time, which is crucial during OOM killer invocation. Signed-off-by: Ley Foon Tan <lftan@altera.com>
Diffstat (limited to 'arch/nios2/mm')
-rw-r--r--arch/nios2/mm/fault.c37
1 files changed, 32 insertions, 5 deletions
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index d194c0427b26..0d231adfe576 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -47,7 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
struct mm_struct *mm = tsk->mm;
int code = SEGV_MAPERR;
int fault;
- unsigned int flags = 0;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
cause >>= 2;
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->ea))
goto bad_area_nosemaphore;
+retry:
down_read(&mm->mmap_sem);
}
@@ -132,6 +133,10 @@ survive:
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -141,10 +146,32 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;