From 1d78a62cb3bb2bd95d00149daaa144f1fe0a77df Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 31 Jan 2018 16:17:55 -0800 Subject: arm64: provide pmdp_establish() helper We need an atomic way to setup pmd page table entry, avoiding races with CPU setting dirty/accessed bits. This is required to implement pmdp_invalidate() that doesn't lose these bits. Link: http://lkml.kernel.org/r/20171213105756.69879-5-kirill.shutemov@linux.intel.com Signed-off-by: Catalin Marinas Signed-off-by: Kirill A. Shutemov Cc: Vlastimil Babka Cc: Andrea Arcangeli Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/pgtable.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 89167c43ebb5..094374c82db0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -706,6 +706,13 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, { ptep_set_wrprotect(mm, address, (pte_t *)pmdp); } + +#define pmdp_establish pmdp_establish +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); +} #endif extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -- cgit v1.2.3