aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorLinus Torvalds2022-06-05 10:57:35 -0700
committerLinus Torvalds2022-06-05 10:57:35 -0700
commit0b7da15c21c2d4a37495fafc0350acec18f67d87 (patch)
treef5c747385e59d1fb26d469b53c3709491d21374d /arch/x86/mm/init_64.c
parent9784edd73a08ea08d0ce5606e1f0f729df688c59 (diff)
parente19d11267f0e6c8aff2d15d2dfed12365b4c9184 (diff)
Merge tag 'x86-mm-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm cleanup from Thomas Gleixner: "Use PAGE_ALIGNED() instead of open coding it in the x86/mm code" * tag 'x86-mm-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Use PAGE_ALIGNED(x) instead of IS_ALIGNED(x, PAGE_SIZE)
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index cb290a2f0747..39c5246964a9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1240,8 +1240,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
- VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
- VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+ VM_BUG_ON(!PAGE_ALIGNED(start));
+ VM_BUG_ON(!PAGE_ALIGNED(end));
remove_pagetable(start, end, false, altmap);
}
@@ -1605,8 +1605,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
int err;
- VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
- VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+ VM_BUG_ON(!PAGE_ALIGNED(start));
+ VM_BUG_ON(!PAGE_ALIGNED(end));
if (end - start < PAGES_PER_SECTION * sizeof(struct page))
err = vmemmap_populate_basepages(start, end, node, NULL);