aboutsummaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V2017-02-24 14:59:19 -0800
committerLinus Torvalds2017-02-24 17:46:56 -0800
commit595cd8f256d24face93b2722927ec9c980419c26 (patch)
tree4884ad51a34aa931eef71932ed7546ab390f51ef /mm/ksm.c
parent288bc54949fc2625a4fd811a188fb200cc498946 (diff)
mm/ksm: handle protnone saved writes when making page write protect
Without this KSM will consider the page write protected, but a numa fault can later mark the page writable. This can result in memory corruption. Link: http://lkml.kernel.org/r/1487498625-10891-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 8960f6ecbc12..cf211c01ceac 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
goto out_unlock;
- if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
+ if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
+ (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
pte_t entry;
swapped = PageSwapCache(page);
@@ -905,7 +906,11 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
}
if (pte_dirty(entry))
set_page_dirty(page);
- entry = pte_mkclean(pte_wrprotect(entry));
+
+ if (pte_protnone(entry))
+ entry = pte_mkclean(pte_clear_savedwrite(entry));
+ else
+ entry = pte_mkclean(pte_wrprotect(entry));
set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
}
*orig_pte = *pvmw.pte;