aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeiko Carstens2023-04-14 14:30:46 +0200
committerVasily Gorbik2023-04-20 11:36:29 +0200
commit34e4c79f3ba9e3d7de56be8ef1a514950915e0ee (patch)
treeb437fca5e2a275d45051454e75f9825d99d7c4c3
parent7608f70adcb1ea6957d7b9e1d5bd53584178fbbc (diff)
s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc()
Make use of the set_direct_map() calls for module allocations. In particular: - All changes to read-only permissions in kernel VA mappings are also applied to the direct mapping. Note that execute permissions are intentionally not applied to the direct mapping in order to make sure that all allocated pages within the direct mapping stay non-executable - module_alloc() passes the VM_FLUSH_RESET_PERMS to __vmalloc_node_range() to make sure that all implicit permission changes made to the direct mapping are reset when the allocated vm area is freed again Side effects: the direct mapping will be fragmented depending on how many vm areas with VM_FLUSH_RESET_PERMS and/or explicit page permission changes are allocated and freed again. For example, just after boot of a system the direct mapping statistics look like: $cat /proc/meminfo ... DirectMap4k: 111628 kB DirectMap1M: 16665600 kB DirectMap2G: 0 kB Acked-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
-rw-r--r--arch/s390/kernel/module.c7
-rw-r--r--arch/s390/mm/pageattr.c55
2 files changed, 54 insertions, 8 deletions
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 07a13546980d..adf3796f4dab 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -62,9 +62,10 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN,
- MODULES_VADDR + get_module_load_offset(), MODULES_END,
- gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
- __builtin_return_address(0));
+ MODULES_VADDR + get_module_load_offset(),
+ MODULES_END, gfp_mask, PAGE_KERNEL,
+ VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
+ NUMA_NO_NODE, __builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 0b196dea2d92..5ba3bd8a7b12 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -323,9 +323,6 @@ static int change_page_attr(unsigned long addr, unsigned long end,
int rc = -EINVAL;
pgd_t *pgdp;
- if (addr == end)
- return 0;
- mutex_lock(&cpa_mutex);
pgdp = pgd_offset_k(addr);
do {
if (pgd_none(*pgdp))
@@ -336,18 +333,66 @@ static int change_page_attr(unsigned long addr, unsigned long end,
break;
cond_resched();
} while (pgdp++, addr = next, addr < end && !rc);
- mutex_unlock(&cpa_mutex);
+ return rc;
+}
+
+static int change_page_attr_alias(unsigned long addr, unsigned long end,
+ unsigned long flags)
+{
+ unsigned long alias, offset, va_start, va_end;
+ struct vm_struct *area;
+ int rc = 0;
+
+ /*
+ * Changes to read-only permissions on kernel VA mappings are also
+ * applied to the kernel direct mapping. Execute permissions are
+ * intentionally not transferred to keep all allocated pages within
+ * the direct mapping non-executable.
+ */
+ flags &= SET_MEMORY_RO | SET_MEMORY_RW;
+ if (!flags)
+ return 0;
+ area = NULL;
+ while (addr < end) {
+ if (!area)
+ area = find_vm_area((void *)addr);
+ if (!area || !(area->flags & VM_ALLOC))
+ return 0;
+ va_start = (unsigned long)area->addr;
+ va_end = va_start + area->nr_pages * PAGE_SIZE;
+ offset = (addr - va_start) >> PAGE_SHIFT;
+ alias = (unsigned long)page_address(area->pages[offset]);
+ rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
+ if (rc)
+ break;
+ addr += PAGE_SIZE;
+ if (addr >= va_end)
+ area = NULL;
+ }
return rc;
}
int __set_memory(unsigned long addr, int numpages, unsigned long flags)
{
+ unsigned long end;
+ int rc;
+
if (!MACHINE_HAS_NX)
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
if (!flags)
return 0;
+ if (!numpages)
+ return 0;
addr &= PAGE_MASK;
- return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
+ end = addr + numpages * PAGE_SIZE;
+ mutex_lock(&cpa_mutex);
+ rc = change_page_attr(addr, end, flags);
+ if (rc)
+ goto out;
+ rc = change_page_attr_alias(addr, end, flags);
+out:
+ mutex_unlock(&cpa_mutex);
+ return rc;
}
int set_direct_map_invalid_noflush(struct page *page)